repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
chen0510566/MissionPlanner | Lib/distutils/ccompiler.py | 50 | 49641 | """distutils.ccompiler
Contains CCompiler, an abstract base class that defines the interface
for the Distutils compiler abstraction model."""
__revision__ = "$Id$"
import sys
import os
import re
from distutils.errors import (CompileError, LinkError, UnknownFileError,
DistutilsPlatformError, DistutilsModuleError)
from distutils.spawn import spawn
from distutils.file_util import move_file
from distutils.dir_util import mkpath
from distutils.dep_util import newer_group
from distutils.util import split_quoted, execute
from distutils import log
_sysconfig = __import__('sysconfig')
def customize_compiler(compiler):
"""Do any platform-specific customization of a CCompiler instance.
Mainly needed on Unix, so we can plug in the information that
varies across Unices and is stored in Python's Makefile.
"""
if compiler.compiler_type == "unix":
(cc, cxx, opt, cflags, ccshared, ldshared, so_ext, ar, ar_flags) = \
_sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS',
'CCSHARED', 'LDSHARED', 'SO', 'AR',
'ARFLAGS')
if 'CC' in os.environ:
cc = os.environ['CC']
if 'CXX' in os.environ:
cxx = os.environ['CXX']
if 'LDSHARED' in os.environ:
ldshared = os.environ['LDSHARED']
if 'CPP' in os.environ:
cpp = os.environ['CPP']
else:
cpp = cc + " -E" # not always
if 'LDFLAGS' in os.environ:
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
if 'CFLAGS' in os.environ:
cflags = opt + ' ' + os.environ['CFLAGS']
ldshared = ldshared + ' ' + os.environ['CFLAGS']
if 'CPPFLAGS' in os.environ:
cpp = cpp + ' ' + os.environ['CPPFLAGS']
cflags = cflags + ' ' + os.environ['CPPFLAGS']
ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
if 'AR' in os.environ:
ar = os.environ['AR']
if 'ARFLAGS' in os.environ:
archiver = ar + ' ' + os.environ['ARFLAGS']
else:
archiver = ar + ' ' + ar_flags
cc_cmd = cc + ' ' + cflags
compiler.set_executables(
preprocessor=cpp,
compiler=cc_cmd,
compiler_so=cc_cmd + ' ' + ccshared,
compiler_cxx=cxx,
linker_so=ldshared,
linker_exe=cc,
archiver=archiver)
compiler.shared_lib_extension = so_ext
class CCompiler:
"""Abstract base class to define the interface that must be implemented
by real compiler classes. Also has some utility methods used by
several compiler classes.
The basic idea behind a compiler abstraction class is that each
instance can be used for all the compile/link steps in building a
single project. Thus, attributes common to all of those compile and
link steps -- include directories, macros to define, libraries to link
against, etc. -- are attributes of the compiler instance. To allow for
variability in how individual files are treated, most of those
attributes may be varied on a per-compilation or per-link basis.
"""
# 'compiler_type' is a class attribute that identifies this class. It
# keeps code that wants to know what kind of compiler it's dealing with
# from having to import all possible compiler classes just to do an
# 'isinstance'. In concrete CCompiler subclasses, 'compiler_type'
# should really, really be one of the keys of the 'compiler_class'
# dictionary (see below -- used by the 'new_compiler()' factory
# function) -- authors of new compiler interface classes are
# responsible for updating 'compiler_class'!
compiler_type = None
# XXX things not handled by this compiler abstraction model:
# * client can't provide additional options for a compiler,
# e.g. warning, optimization, debugging flags. Perhaps this
# should be the domain of concrete compiler abstraction classes
# (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base
# class should have methods for the common ones.
# * can't completely override the include or library searchg
# path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2".
# I'm not sure how widely supported this is even by Unix
# compilers, much less on other platforms. And I'm even less
# sure how useful it is; maybe for cross-compiling, but
# support for that is a ways off. (And anyways, cross
# compilers probably have a dedicated binary with the
# right paths compiled in. I hope.)
# * can't do really freaky things with the library list/library
# dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against
# different versions of libfoo.a in different locations. I
# think this is useless without the ability to null out the
# library search path anyways.
# Subclasses that rely on the standard filename generation methods
# implemented below should override these; see the comment near
# those methods ('object_filenames()' et. al.) for details:
src_extensions = None # list of strings
obj_extension = None # string
static_lib_extension = None
shared_lib_extension = None # string
static_lib_format = None # format string
shared_lib_format = None # prob. same as static_lib_format
exe_extension = None # string
# Default language settings. language_map is used to detect a source
# file or Extension target language, checking source filenames.
# language_order is used to detect the language precedence, when deciding
# what language to use when mixing source types. For example, if some
# extension has two files with ".c" extension, and one with ".cpp", it
# is still linked as c++.
language_map = {".c" : "c",
".cc" : "c++",
".cpp" : "c++",
".cxx" : "c++",
".m" : "objc",
}
language_order = ["c++", "objc", "c"]
def __init__ (self, verbose=0, dry_run=0, force=0):
self.dry_run = dry_run
self.force = force
self.verbose = verbose
# 'output_dir': a common output directory for object, library,
# shared object, and shared library files
self.output_dir = None
# 'macros': a list of macro definitions (or undefinitions). A
# macro definition is a 2-tuple (name, value), where the value is
# either a string or None (no explicit value). A macro
# undefinition is a 1-tuple (name,).
self.macros = []
# 'include_dirs': a list of directories to search for include files
self.include_dirs = []
# 'libraries': a list of libraries to include in any link
# (library names, not filenames: eg. "foo" not "libfoo.a")
self.libraries = []
# 'library_dirs': a list of directories to search for libraries
self.library_dirs = []
# 'runtime_library_dirs': a list of directories to search for
# shared libraries/objects at runtime
self.runtime_library_dirs = []
# 'objects': a list of object files (or similar, such as explicitly
# named library files) to include on any link
self.objects = []
for key in self.executables.keys():
self.set_executable(key, self.executables[key])
def set_executables(self, **args):
"""Define the executables (and options for them) that will be run
to perform the various stages of compilation. The exact set of
executables that may be specified here depends on the compiler
class (via the 'executables' class attribute), but most will have:
compiler the C/C++ compiler
linker_so linker used to create shared objects and libraries
linker_exe linker used to create binary executables
archiver static library creator
On platforms with a command-line (Unix, DOS/Windows), each of these
is a string that will be split into executable name and (optional)
list of arguments. (Splitting the string is done similarly to how
Unix shells operate: words are delimited by spaces, but quotes and
backslashes can override this. See
'distutils.util.split_quoted()'.)
"""
# Note that some CCompiler implementation classes will define class
# attributes 'cpp', 'cc', etc. with hard-coded executable names;
# this is appropriate when a compiler class is for exactly one
# compiler/OS combination (eg. MSVCCompiler). Other compiler
# classes (UnixCCompiler, in particular) are driven by information
# discovered at run-time, since there are many different ways to do
# basically the same things with Unix C compilers.
for key in args.keys():
if key not in self.executables:
raise ValueError, \
"unknown executable '%s' for class %s" % \
(key, self.__class__.__name__)
self.set_executable(key, args[key])
def set_executable(self, key, value):
if isinstance(value, str):
setattr(self, key, split_quoted(value))
else:
setattr(self, key, value)
def _find_macro(self, name):
i = 0
for defn in self.macros:
if defn[0] == name:
return i
i = i + 1
return None
def _check_macro_definitions(self, definitions):
"""Ensures that every element of 'definitions' is a valid macro
definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do
nothing if all definitions are OK, raise TypeError otherwise.
"""
for defn in definitions:
if not (isinstance(defn, tuple) and
(len (defn) == 1 or
(len (defn) == 2 and
(isinstance(defn[1], str) or defn[1] is None))) and
isinstance(defn[0], str)):
raise TypeError, \
("invalid macro definition '%s': " % defn) + \
"must be tuple (string,), (string, string), or " + \
"(string, None)"
# -- Bookkeeping methods -------------------------------------------
def define_macro(self, name, value=None):
"""Define a preprocessor macro for all compilations driven by this
compiler object. The optional parameter 'value' should be a
string; if it is not supplied, then the macro will be defined
without an explicit value and the exact outcome depends on the
compiler used (XXX true? does ANSI say anything about this?)
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
defn = (name, value)
self.macros.append (defn)
def undefine_macro(self, name):
"""Undefine a preprocessor macro for all compilations driven by
this compiler object. If the same macro is defined by
'define_macro()' and undefined by 'undefine_macro()' the last call
takes precedence (including multiple redefinitions or
undefinitions). If the macro is redefined/undefined on a
per-compilation basis (ie. in the call to 'compile()'), then that
takes precedence.
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
undefn = (name,)
self.macros.append (undefn)
def add_include_dir(self, dir):
"""Add 'dir' to the list of directories that will be searched for
header files. The compiler is instructed to search directories in
the order in which they are supplied by successive calls to
'add_include_dir()'.
"""
self.include_dirs.append (dir)
def set_include_dirs(self, dirs):
"""Set the list of directories that will be searched to 'dirs' (a
list of strings). Overrides any preceding calls to
'add_include_dir()'; subsequence calls to 'add_include_dir()' add
to the list passed to 'set_include_dirs()'. This does not affect
any list of standard include directories that the compiler may
search by default.
"""
self.include_dirs = dirs[:]
def add_library(self, libname):
"""Add 'libname' to the list of libraries that will be included in
all links driven by this compiler object. Note that 'libname'
should *not* be the name of a file containing a library, but the
name of the library itself: the actual filename will be inferred by
the linker, the compiler, or the compiler class (depending on the
platform).
The linker will be instructed to link against libraries in the
order they were supplied to 'add_library()' and/or
'set_libraries()'. It is perfectly valid to duplicate library
names; the linker will be instructed to link against libraries as
many times as they are mentioned.
"""
self.libraries.append (libname)
def set_libraries(self, libnames):
"""Set the list of libraries to be included in all links driven by
this compiler object to 'libnames' (a list of strings). This does
not affect any standard system libraries that the linker may
include by default.
"""
self.libraries = libnames[:]
def add_library_dir(self, dir):
"""Add 'dir' to the list of directories that will be searched for
libraries specified to 'add_library()' and 'set_libraries()'. The
linker will be instructed to search for libraries in the order they
are supplied to 'add_library_dir()' and/or 'set_library_dirs()'.
"""
self.library_dirs.append(dir)
def set_library_dirs(self, dirs):
"""Set the list of library search directories to 'dirs' (a list of
strings). This does not affect any standard library search path
that the linker may search by default.
"""
self.library_dirs = dirs[:]
def add_runtime_library_dir(self, dir):
"""Add 'dir' to the list of directories that will be searched for
shared libraries at runtime.
"""
self.runtime_library_dirs.append(dir)
def set_runtime_library_dirs(self, dirs):
"""Set the list of directories to search for shared libraries at
runtime to 'dirs' (a list of strings). This does not affect any
standard search path that the runtime linker may search by
default.
"""
self.runtime_library_dirs = dirs[:]
def add_link_object(self, object):
"""Add 'object' to the list of object files (or analogues, such as
explicitly named library files or the output of "resource
compilers") to be included in every link driven by this compiler
object.
"""
self.objects.append(object)
def set_link_objects(self, objects):
"""Set the list of object files (or analogues) to be included in
every link to 'objects'. This does not affect any standard object
files that the linker may include by default (such as system
libraries).
"""
self.objects = objects[:]
# -- Private utility methods --------------------------------------
# (here for the convenience of subclasses)
# Helper method to prep compiler in subclass compile() methods
def _setup_compile(self, outdir, macros, incdirs, sources, depends,
extra):
"""Process arguments and decide which source files to compile."""
if outdir is None:
outdir = self.output_dir
elif not isinstance(outdir, str):
raise TypeError, "'output_dir' must be a string or None"
if macros is None:
macros = self.macros
elif isinstance(macros, list):
macros = macros + (self.macros or [])
else:
raise TypeError, "'macros' (if supplied) must be a list of tuples"
if incdirs is None:
incdirs = self.include_dirs
elif isinstance(incdirs, (list, tuple)):
incdirs = list(incdirs) + (self.include_dirs or [])
else:
raise TypeError, \
"'include_dirs' (if supplied) must be a list of strings"
if extra is None:
extra = []
# Get the list of expected output (object) files
objects = self.object_filenames(sources,
strip_dir=0,
output_dir=outdir)
assert len(objects) == len(sources)
pp_opts = gen_preprocess_options(macros, incdirs)
build = {}
for i in range(len(sources)):
src = sources[i]
obj = objects[i]
ext = os.path.splitext(src)[1]
self.mkpath(os.path.dirname(obj))
build[obj] = (src, ext)
return macros, objects, extra, pp_opts, build
def _get_cc_args(self, pp_opts, debug, before):
# works for unixccompiler, emxccompiler, cygwinccompiler
cc_args = pp_opts + ['-c']
if debug:
cc_args[:0] = ['-g']
if before:
cc_args[:0] = before
return cc_args
def _fix_compile_args(self, output_dir, macros, include_dirs):
"""Typecheck and fix-up some of the arguments to the 'compile()'
method, and return fixed-up values. Specifically: if 'output_dir'
is None, replaces it with 'self.output_dir'; ensures that 'macros'
is a list, and augments it with 'self.macros'; ensures that
'include_dirs' is a list, and augments it with 'self.include_dirs'.
Guarantees that the returned values are of the correct type,
i.e. for 'output_dir' either string or None, and for 'macros' and
'include_dirs' either list or None.
"""
if output_dir is None:
output_dir = self.output_dir
elif not isinstance(output_dir, str):
raise TypeError, "'output_dir' must be a string or None"
if macros is None:
macros = self.macros
elif isinstance(macros, list):
macros = macros + (self.macros or [])
else:
raise TypeError, "'macros' (if supplied) must be a list of tuples"
if include_dirs is None:
include_dirs = self.include_dirs
elif isinstance(include_dirs, (list, tuple)):
include_dirs = list (include_dirs) + (self.include_dirs or [])
else:
raise TypeError, \
"'include_dirs' (if supplied) must be a list of strings"
return output_dir, macros, include_dirs
def _fix_object_args(self, objects, output_dir):
"""Typecheck and fix up some arguments supplied to various methods.
Specifically: ensure that 'objects' is a list; if output_dir is
None, replace with self.output_dir. Return fixed versions of
'objects' and 'output_dir'.
"""
if not isinstance(objects, (list, tuple)):
raise TypeError, \
"'objects' must be a list or tuple of strings"
objects = list (objects)
if output_dir is None:
output_dir = self.output_dir
elif not isinstance(output_dir, str):
raise TypeError, "'output_dir' must be a string or None"
return (objects, output_dir)
def _fix_lib_args(self, libraries, library_dirs, runtime_library_dirs):
"""Typecheck and fix up some of the arguments supplied to the
'link_*' methods. Specifically: ensure that all arguments are
lists, and augment them with their permanent versions
(eg. 'self.libraries' augments 'libraries'). Return a tuple with
fixed versions of all arguments.
"""
if libraries is None:
libraries = self.libraries
elif isinstance(libraries, (list, tuple)):
libraries = list (libraries) + (self.libraries or [])
else:
raise TypeError, \
"'libraries' (if supplied) must be a list of strings"
if library_dirs is None:
library_dirs = self.library_dirs
elif isinstance(library_dirs, (list, tuple)):
library_dirs = list (library_dirs) + (self.library_dirs or [])
else:
raise TypeError, \
"'library_dirs' (if supplied) must be a list of strings"
if runtime_library_dirs is None:
runtime_library_dirs = self.runtime_library_dirs
elif isinstance(runtime_library_dirs, (list, tuple)):
runtime_library_dirs = (list (runtime_library_dirs) +
(self.runtime_library_dirs or []))
else:
raise TypeError, \
"'runtime_library_dirs' (if supplied) " + \
"must be a list of strings"
return (libraries, library_dirs, runtime_library_dirs)
def _need_link(self, objects, output_file):
"""Return true if we need to relink the files listed in 'objects'
to recreate 'output_file'.
"""
if self.force:
return 1
else:
if self.dry_run:
newer = newer_group (objects, output_file, missing='newer')
else:
newer = newer_group (objects, output_file)
return newer
def detect_language(self, sources):
"""Detect the language of a given file, or list of files. Uses
language_map, and language_order to do the job.
"""
if not isinstance(sources, list):
sources = [sources]
lang = None
index = len(self.language_order)
for source in sources:
base, ext = os.path.splitext(source)
extlang = self.language_map.get(ext)
try:
extindex = self.language_order.index(extlang)
if extindex < index:
lang = extlang
index = extindex
except ValueError:
pass
return lang
# -- Worker methods ------------------------------------------------
# (must be implemented by subclasses)
def preprocess(self, source, output_file=None, macros=None,
include_dirs=None, extra_preargs=None, extra_postargs=None):
"""Preprocess a single C/C++ source file, named in 'source'.
Output will be written to file named 'output_file', or stdout if
'output_file' not supplied. 'macros' is a list of macro
definitions as for 'compile()', which will augment the macros set
with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a
list of directory names that will be added to the default list.
Raises PreprocessError on failure.
"""
pass
def compile(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, depends=None):
"""Compile one or more source files.
'sources' must be a list of filenames, most likely C/C++
files, but in reality anything that can be handled by a
particular compiler and compiler class (eg. MSVCCompiler can
handle resource files in 'sources'). Return a list of object
filenames, one per source filename in 'sources'. Depending on
the implementation, not all source files will necessarily be
compiled, but all corresponding object filenames will be
returned.
If 'output_dir' is given, object files will be put under it, while
retaining their original path component. That is, "foo/bar.c"
normally compiles to "foo/bar.o" (for a Unix implementation); if
'output_dir' is "build", then it would compile to
"build/foo/bar.o".
'macros', if given, must be a list of macro definitions. A macro
definition is either a (name, value) 2-tuple or a (name,) 1-tuple.
The former defines a macro; if the value is None, the macro is
defined without an explicit value. The 1-tuple case undefines a
macro. Later definitions/redefinitions/ undefinitions take
precedence.
'include_dirs', if given, must be a list of strings, the
directories to add to the default include file search path for this
compilation only.
'debug' is a boolean; if true, the compiler will be instructed to
output debug symbols in (or alongside) the object file(s).
'extra_preargs' and 'extra_postargs' are implementation- dependent.
On platforms that have the notion of a command-line (e.g. Unix,
DOS/Windows), they are most likely lists of strings: extra
command-line arguments to prepand/append to the compiler command
line. On other platforms, consult the implementation class
documentation. In any event, they are intended as an escape hatch
for those occasions when the abstract compiler framework doesn't
cut the mustard.
'depends', if given, is a list of filenames that all targets
depend on. If a source file is older than any file in
depends, then the source file will be recompiled. This
supports dependency tracking, but only at a coarse
granularity.
Raises CompileError on failure.
"""
# A concrete compiler class can either override this method
# entirely or implement _compile().
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# Return *all* object filenames, not just the ones we just built.
return objects
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile 'src' to product 'obj'."""
# A concrete compiler class that does not override compile()
# should implement _compile().
pass
def create_static_lib(self, objects, output_libname, output_dir=None,
debug=0, target_lang=None):
"""Link a bunch of stuff together to create a static library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects', the extra object files supplied to
'add_link_object()' and/or 'set_link_objects()', the libraries
supplied to 'add_library()' and/or 'set_libraries()', and the
libraries supplied as 'libraries' (if any).
'output_libname' should be a library name, not a filename; the
filename will be inferred from the library name. 'output_dir' is
the directory where the library file will be put.
'debug' is a boolean; if true, debugging information will be
included in the library (note that on most platforms, it is the
compile step where this matters: the 'debug' flag is included here
just for consistency).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LibError on failure.
"""
pass
# values for target_desc parameter in link()
SHARED_OBJECT = "shared_object"
SHARED_LIBRARY = "shared_library"
EXECUTABLE = "executable"
def link(self, target_desc, objects, output_filename, output_dir=None,
libraries=None, library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
"""Link a bunch of stuff together to create an executable or
shared library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects'. 'output_filename' should be a filename. If
'output_dir' is supplied, 'output_filename' is relative to it
(i.e. 'output_filename' can provide directory components if
needed).
'libraries' is a list of libraries to link against. These are
library names, not filenames, since they're translated into
filenames in a platform-specific way (eg. "foo" becomes "libfoo.a"
on Unix and "foo.lib" on DOS/Windows). However, they can include a
directory component, which means the linker will look in that
specific directory rather than searching all the normal locations.
'library_dirs', if supplied, should be a list of directories to
search for libraries that were specified as bare library names
(ie. no directory component). These are on top of the system
default and those supplied to 'add_library_dir()' and/or
'set_library_dirs()'. 'runtime_library_dirs' is a list of
directories that will be embedded into the shared library and used
to search for other shared libraries that *it* depends on at
run-time. (This may only be relevant on Unix.)
'export_symbols' is a list of symbols that the shared library will
export. (This appears to be relevant only on Windows.)
'debug' is as for 'compile()' and 'create_static_lib()', with the
slight distinction that it actually matters on most platforms (as
opposed to 'create_static_lib()', which includes a 'debug' flag
mostly for form's sake).
'extra_preargs' and 'extra_postargs' are as for 'compile()' (except
of course that they supply command-line arguments for the
particular linker being used).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LinkError on failure.
"""
raise NotImplementedError
# Old 'link_*()' methods, rewritten to use the new 'link()' method.
def link_shared_lib(self, objects, output_libname, output_dir=None,
libraries=None, library_dirs=None,
runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None,
build_temp=None, target_lang=None):
self.link(CCompiler.SHARED_LIBRARY, objects,
self.library_filename(output_libname, lib_type='shared'),
output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp, target_lang)
def link_shared_object(self, objects, output_filename, output_dir=None,
libraries=None, library_dirs=None,
runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None,
build_temp=None, target_lang=None):
self.link(CCompiler.SHARED_OBJECT, objects,
output_filename, output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp, target_lang)
def link_executable(self, objects, output_progname, output_dir=None,
libraries=None, library_dirs=None,
runtime_library_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, target_lang=None):
self.link(CCompiler.EXECUTABLE, objects,
self.executable_filename(output_progname), output_dir,
libraries, library_dirs, runtime_library_dirs, None,
debug, extra_preargs, extra_postargs, None, target_lang)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function; there is
# no appropriate default implementation so subclasses should
# implement all of these.
def library_dir_option(self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for libraries.
"""
raise NotImplementedError
def runtime_library_dir_option(self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for runtime libraries.
"""
raise NotImplementedError
def library_option(self, lib):
"""Return the compiler option to add 'dir' to the list of libraries
linked into the shared library or executable.
"""
raise NotImplementedError
def has_function(self, funcname, includes=None, include_dirs=None,
libraries=None, library_dirs=None):
"""Return a boolean indicating whether funcname is supported on
the current platform. The optional arguments can be used to
augment the compilation environment.
"""
# this can't be included at module scope because it tries to
# import math which might not be available at that point - maybe
# the necessary logic should just be inlined?
import tempfile
if includes is None:
includes = []
if include_dirs is None:
include_dirs = []
if libraries is None:
libraries = []
if library_dirs is None:
library_dirs = []
fd, fname = tempfile.mkstemp(".c", funcname, text=True)
f = os.fdopen(fd, "w")
try:
for incl in includes:
f.write("""#include "%s"\n""" % incl)
f.write("""\
main (int argc, char **argv) {
%s();
}
""" % funcname)
finally:
f.close()
try:
objects = self.compile([fname], include_dirs=include_dirs)
except CompileError:
return False
try:
self.link_executable(objects, "a.out",
libraries=libraries,
library_dirs=library_dirs)
except (LinkError, TypeError):
return False
return True
def find_library_file (self, dirs, lib, debug=0):
"""Search the specified list of directories for a static or shared
library file 'lib' and return the full path to that file. If
'debug' true, look for a debugging version (if that makes sense on
the current platform). Return None if 'lib' wasn't found in any of
the specified directories.
"""
raise NotImplementedError
# -- Filename generation methods -----------------------------------
# The default implementation of the filename generating methods are
# prejudiced towards the Unix/DOS/Windows view of the world:
# * object files are named by replacing the source file extension
# (eg. .c/.cpp -> .o/.obj)
# * library files (shared or static) are named by plugging the
# library name and extension into a format string, eg.
# "lib%s.%s" % (lib_name, ".a") for Unix static libraries
# * executables are named by appending an extension (possibly
# empty) to the program name: eg. progname + ".exe" for
# Windows
#
# To reduce redundant code, these methods expect to find
# several attributes in the current object (presumably defined
# as class attributes):
# * src_extensions -
# list of C/C++ source file extensions, eg. ['.c', '.cpp']
# * obj_extension -
# object file extension, eg. '.o' or '.obj'
# * static_lib_extension -
# extension for static library files, eg. '.a' or '.lib'
# * shared_lib_extension -
# extension for shared library/object files, eg. '.so', '.dll'
# * static_lib_format -
# format string for generating static library filenames,
# eg. 'lib%s.%s' or '%s.%s'
# * shared_lib_format
# format string for generating shared library filenames
# (probably same as static_lib_format, since the extension
# is one of the intended parameters to the format string)
# * exe_extension -
# extension for executable files, eg. '' or '.exe'
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
base, ext = os.path.splitext(src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % (ext, src_name)
if strip_dir:
base = os.path.basename(base)
obj_names.append(os.path.join(output_dir,
base + self.obj_extension))
return obj_names
def shared_object_filename(self, basename, strip_dir=0, output_dir=''):
assert output_dir is not None
if strip_dir:
basename = os.path.basename (basename)
return os.path.join(output_dir, basename + self.shared_lib_extension)
def executable_filename(self, basename, strip_dir=0, output_dir=''):
assert output_dir is not None
if strip_dir:
basename = os.path.basename (basename)
return os.path.join(output_dir, basename + (self.exe_extension or ''))
def library_filename(self, libname, lib_type='static', # or 'shared'
strip_dir=0, output_dir=''):
assert output_dir is not None
if lib_type not in ("static", "shared", "dylib"):
raise ValueError, "'lib_type' must be \"static\", \"shared\" or \"dylib\""
fmt = getattr(self, lib_type + "_lib_format")
ext = getattr(self, lib_type + "_lib_extension")
dir, base = os.path.split (libname)
filename = fmt % (base, ext)
if strip_dir:
dir = ''
return os.path.join(output_dir, dir, filename)
# -- Utility methods -----------------------------------------------
def announce(self, msg, level=1):
log.debug(msg)
def debug_print(self, msg):
from distutils.debug import DEBUG
if DEBUG:
print msg
def warn(self, msg):
sys.stderr.write("warning: %s\n" % msg)
def execute(self, func, args, msg=None, level=1):
execute(func, args, msg, self.dry_run)
def spawn(self, cmd):
spawn(cmd, dry_run=self.dry_run)
def move_file(self, src, dst):
return move_file(src, dst, dry_run=self.dry_run)
def mkpath(self, name, mode=0777):
mkpath(name, mode, dry_run=self.dry_run)
# class CCompiler
# Map a sys.platform/os.name ('posix', 'nt') to the default compiler
# type for that platform. Keys are interpreted as re match
# patterns. Order is important; platform mappings are preferred over
# OS names.
_default_compilers = (
# Platform string mappings
# on a cygwin built python we can use gcc like an ordinary UNIXish
# compiler
('cygwin.*', 'unix'),
('os2emx', 'emx'),
# OS name mappings
('posix', 'unix'),
('nt', 'msvc'),
)
def get_default_compiler(osname=None, platform=None):
""" Determine the default compiler to use for the given platform.
osname should be one of the standard Python OS names (i.e. the
ones returned by os.name) and platform the common value
returned by sys.platform for the platform in question.
The default values are os.name and sys.platform in case the
parameters are not given.
"""
if osname is None:
osname = os.name
if platform is None:
platform = sys.platform
for pattern, compiler in _default_compilers:
if re.match(pattern, platform) is not None or \
re.match(pattern, osname) is not None:
return compiler
# Default to Unix compiler
return 'unix'
# Map compiler types to (module_name, class_name) pairs -- ie. where to
# find the code that implements an interface to this compiler. (The module
# is assumed to be in the 'distutils' package.)
compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler',
"standard UNIX-style compiler"),
'msvc': ('msvccompiler', 'MSVCCompiler',
"Microsoft Visual C++"),
'cygwin': ('cygwinccompiler', 'CygwinCCompiler',
"Cygwin port of GNU C Compiler for Win32"),
'mingw32': ('cygwinccompiler', 'Mingw32CCompiler',
"Mingw32 port of GNU C Compiler for Win32"),
'bcpp': ('bcppcompiler', 'BCPPCompiler',
"Borland C++ Compiler"),
'emx': ('emxccompiler', 'EMXCCompiler',
"EMX port of GNU C Compiler for OS/2"),
}
def show_compilers():
"""Print list of available compilers (used by the "--help-compiler"
options to "build", "build_ext", "build_clib").
"""
# XXX this "knows" that the compiler option it's describing is
# "--compiler", which just happens to be the case for the three
# commands that use it.
from distutils.fancy_getopt import FancyGetopt
compilers = []
for compiler in compiler_class.keys():
compilers.append(("compiler="+compiler, None,
compiler_class[compiler][2]))
compilers.sort()
pretty_printer = FancyGetopt(compilers)
pretty_printer.print_help("List of available compilers:")
def new_compiler(plat=None, compiler=None, verbose=0, dry_run=0, force=0):
"""Generate an instance of some CCompiler subclass for the supplied
platform/compiler combination. 'plat' defaults to 'os.name'
(eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler
for that platform. Currently only 'posix' and 'nt' are supported, and
the default compilers are "traditional Unix interface" (UnixCCompiler
class) and Visual C++ (MSVCCompiler class). Note that it's perfectly
possible to ask for a Unix compiler object under Windows, and a
Microsoft compiler object under Unix -- if you supply a value for
'compiler', 'plat' is ignored.
"""
if plat is None:
plat = os.name
try:
if compiler is None:
compiler = get_default_compiler(plat)
(module_name, class_name, long_description) = compiler_class[compiler]
except KeyError:
msg = "don't know how to compile C/C++ code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler" % compiler
raise DistutilsPlatformError, msg
try:
module_name = "distutils." + module_name
__import__ (module_name)
module = sys.modules[module_name]
klass = vars(module)[class_name]
except ImportError:
raise DistutilsModuleError, \
"can't compile C/C++ code: unable to load module '%s'" % \
module_name
except KeyError:
raise DistutilsModuleError, \
("can't compile C/C++ code: unable to find class '%s' " +
"in module '%s'") % (class_name, module_name)
# XXX The None is necessary to preserve backwards compatibility
# with classes that expect verbose to be the first positional
# argument.
return klass(None, dry_run, force)
def gen_preprocess_options(macros, include_dirs):
"""Generate C pre-processor options (-D, -U, -I) as used by at least
two types of compilers: the typical Unix compiler and Visual C++.
'macros' is the usual thing, a list of 1- or 2-tuples, where (name,)
means undefine (-U) macro 'name', and (name,value) means define (-D)
macro 'name' to 'value'. 'include_dirs' is just a list of directory
names to be added to the header file search path (-I). Returns a list
of command-line options suitable for either Unix compilers or Visual
C++.
"""
# XXX it would be nice (mainly aesthetic, and so we don't generate
# stupid-looking command lines) to go over 'macros' and eliminate
# redundant definitions/undefinitions (ie. ensure that only the
# latest mention of a particular macro winds up on the command
# line). I don't think it's essential, though, since most (all?)
# Unix C compilers only pay attention to the latest -D or -U
# mention of a macro on their command line. Similar situation for
# 'include_dirs'. I'm punting on both for now. Anyways, weeding out
# redundancies like this should probably be the province of
# CCompiler, since the data structures used are inherited from it
# and therefore common to all CCompiler classes.
pp_opts = []
for macro in macros:
if not (isinstance(macro, tuple) and
1 <= len (macro) <= 2):
raise TypeError, \
("bad macro definition '%s': " +
"each element of 'macros' list must be a 1- or 2-tuple") % \
macro
if len (macro) == 1: # undefine this macro
pp_opts.append ("-U%s" % macro[0])
elif len (macro) == 2:
if macro[1] is None: # define with no explicit value
pp_opts.append ("-D%s" % macro[0])
else:
# XXX *don't* need to be clever about quoting the
# macro value here, because we're going to avoid the
# shell at all costs when we spawn the command!
pp_opts.append ("-D%s=%s" % macro)
for dir in include_dirs:
pp_opts.append ("-I%s" % dir)
return pp_opts
def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries):
"""Generate linker options for searching library directories and
linking with specific libraries.
'libraries' and 'library_dirs' are, respectively, lists of library names
(not filenames!) and search directories. Returns a list of command-line
options suitable for use with some compiler (depending on the two format
strings passed in).
"""
lib_opts = []
for dir in library_dirs:
lib_opts.append(compiler.library_dir_option(dir))
for dir in runtime_library_dirs:
opt = compiler.runtime_library_dir_option(dir)
if isinstance(opt, list):
lib_opts.extend(opt)
else:
lib_opts.append(opt)
# XXX it's important that we *not* remove redundant library mentions!
# sometimes you really do have to say "-lfoo -lbar -lfoo" in order to
# resolve all symbols. I just hope we never have to say "-lfoo obj.o
# -lbar" to get things to work -- that's certainly a possibility, but a
# pretty nasty way to arrange your C code.
for lib in libraries:
lib_dir, lib_name = os.path.split(lib)
if lib_dir != '':
lib_file = compiler.find_library_file([lib_dir], lib_name)
if lib_file is not None:
lib_opts.append(lib_file)
else:
compiler.warn("no library file corresponding to "
"'%s' found (skipping)" % lib)
else:
lib_opts.append(compiler.library_option(lib))
return lib_opts
| gpl-3.0 |
michaelkuty/django-oscar | tests/integration/basket/form_tests.py | 45 | 3927 | from django.test import TestCase
from django.conf import settings
import mock
from oscar.apps.basket import forms
from oscar.test import factories
class TestBasketLineForm(TestCase):
def setUp(self):
self.basket = factories.create_basket()
self.line = self.basket.all_lines()[0]
def mock_availability_return_value(self, is_available, reason=''):
policy = self.line.purchase_info.availability
policy.is_purchase_permitted = mock.MagicMock(
return_value=(is_available, reason))
def build_form(self, quantity=None):
if quantity is None:
quantity = self.line.quantity
return forms.BasketLineForm(
strategy=self.basket.strategy,
data={'quantity': quantity},
instance=self.line)
def test_enforces_availability_policy_for_valid_quantities(self):
self.mock_availability_return_value(True)
form = self.build_form()
self.assertTrue(form.is_valid())
def test_enforces_availability_policy_for_invalid_quantities(self):
self.mock_availability_return_value(False, "Some reason")
form = self.build_form()
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['quantity'], ['Some reason'])
def test_skips_availability_policy_for_zero_quantities(self):
self.mock_availability_return_value(True)
form = self.build_form(quantity=0)
self.assertTrue(form.is_valid())
def test_enforces_max_line_quantity(self):
invalid_qty = settings.OSCAR_MAX_BASKET_QUANTITY_THRESHOLD + 1
form = self.build_form(quantity=invalid_qty)
self.assertFalse(form.is_valid())
class TestAddToBasketForm(TestCase):
def test_allows_a_product_quantity_to_be_increased(self):
basket = factories.create_basket()
product = basket.all_lines()[0].product
# Add more of the same product
data = {'quantity': 1}
form = forms.AddToBasketForm(
basket=basket, product=product, data=data)
self.assertTrue(form.is_valid())
def test_checks_whether_passed_product_id_matches_a_real_product(self):
basket = factories.create_basket()
product = basket.all_lines()[0].product
# Add more of the same product
data = {'quantity': -1}
form = forms.AddToBasketForm(
basket=basket, product=product, data=data)
self.assertFalse(form.is_valid())
def test_checks_if_purchase_is_permitted(self):
basket = factories.BasketFactory()
product = factories.ProductFactory()
# Build a 4-level mock monster so we can force the return value of
# whether the product is available to buy. This is a serious code smell
# and needs to be remedied.
info = mock.Mock()
info.availability = mock.Mock()
info.availability.is_purchase_permitted = mock.Mock(
return_value=(False, "Not on your nelly!"))
basket.strategy.fetch_for_product = mock.Mock(
return_value=info)
data = {'quantity': 1}
form = forms.AddToBasketForm(
basket=basket, product=product, data=data)
self.assertFalse(form.is_valid())
self.assertEqual('Not on your nelly!', form.errors['__all__'][0])
def test_mixed_currency_baskets_are_not_permitted(self):
# Ensure basket is one currency
basket = mock.Mock()
basket.currency = 'GBP'
basket.num_items = 1
# Ensure new product has different currency
info = mock.Mock()
info.price.currency = 'EUR'
basket.strategy.fetch_for_product = mock.Mock(
return_value=info)
product = factories.ProductFactory()
data = {'quantity': 1}
form = forms.AddToBasketForm(
basket=basket, product=product, data=data)
self.assertFalse(form.is_valid())
| bsd-3-clause |
sofianehaddad/ot-svn | python/test/t_FORM_sensitivity.py | 2 | 3930 | #! /usr/bin/env python
from openturns import *
from math import *
from math import *
def printNumericalPoint(point, digits):
oss = "["
eps = pow(0.1, digits)
for i in range(point.getDimension()):
if i == 0:
sep = ""
else:
sep = ","
if fabs(point[i]) < eps:
oss += sep + '%.6f' % fabs(point[i])
else:
oss += sep + '%.6f' % point[i]
sep = ","
oss += "]"
return oss
TESTPREAMBLE()
try:
# We create a numerical math function
# Analytical construction
inputFunc = Description(2)
inputFunc[0] = "x0"
inputFunc[1] = "x1"
outputFunc = Description(1)
outputFunc[0] = "y0"
formulas = Description(outputFunc.getSize())
formulas[0] = "-(6+x0^2-x1)"
print "formulas=", formulas
myFunction = NumericalMathFunction(inputFunc, outputFunc, formulas)
dim = myFunction.getInputDimension()
# We create a normal distribution point of dimension 1
mean = NumericalPoint(dim, 0.0)
# x0
mean[0] = 5.0
# x1
mean[1] = 2.1
sigma = NumericalPoint(dim, 0.0)
# x0
sigma[0] = 3.3
# x1
sigma[1] = 3.0
R = IdentityMatrix(dim)
#
testDistributions = DistributionCollection(2)
testDistributions[0] = Normal(mean, sigma, R)
marginals = DistributionCollection(2)
marginals[0] = testDistributions[0].getMarginal(0)
marginals[1] = testDistributions[0].getMarginal(1)
testDistributions[1] = ComposedDistribution(marginals, NormalCopula(R))
for i in range(1):
myDistribution = testDistributions[i]
# We name the components of the distribution
componentDescription = Description(dim)
componentDescription[0] = "Marginal 1"
componentDescription[1] = "Marginal 2"
myDistribution.setDescription(componentDescription)
# We create a 'usual' RandomVector from the Distribution
vect = RandomVector(myDistribution)
# We create a composite random vector
output = RandomVector(myFunction, vect)
outputDescription = Description(1)
outputDescription[0] = "Interest Variable 1"
output.setDescription(outputDescription)
# We create an Event from this RandomVector
myEvent = Event(output, Greater(), 0.0)
# We create a NearestPoint algorithm
myCobyla = Cobyla()
myCobyla.setSpecificParameters(CobylaSpecificParameters())
myCobyla.setMaximumIterationsNumber(100)
myCobyla.setMaximumAbsoluteError(1.0e-10)
myCobyla.setMaximumRelativeError(1.0e-10)
myCobyla.setMaximumResidualError(1.0e-10)
myCobyla.setMaximumConstraintError(1.0e-10)
print "myCobyla=", myCobyla
# We create a FORM algorithm
# The first parameter is a NearestPointAlgorithm
# The second parameter is an event
# The third parameter is a starting point for the design point research
myAlgo = FORM(myCobyla, myEvent, mean)
print "FORM=", myAlgo
# Perform the simulation
myAlgo.run()
# Stream out the result
result = FORMResult(myAlgo.getResult())
digits = 5
print "importance factors=", printNumericalPoint(result.getImportanceFactors(), digits)
print "Hasofer reliability index=%.6f" % result.getHasoferReliabilityIndex()
print "result=", result
# Hasofer Reliability Index Sensitivity
hasoferReliabilityIndexSensitivity = result.getHasoferReliabilityIndexSensitivity(
)
print "hasoferReliabilityIndexSensitivity = ", repr(hasoferReliabilityIndexSensitivity)
# Event Probability Sensitivity
eventProbabilitySensitivity = result.getEventProbabilitySensitivity()
print "eventProbabilitySensitivity = ", repr(eventProbabilitySensitivity)
except:
import sys
print "t_FORM_sensitivity.py", sys.exc_type, sys.exc_value
| mit |
cmshobe/landlab | landlab/io/esri_ascii.py | 3 | 17226 | #! /usr/bin/env python
"""Read/write data from an ESRI ASCII file into a RasterModelGrid.
ESRI ASCII functions
++++++++++++++++++++
.. autosummary::
~landlab.io.esri_ascii.read_asc_header
~landlab.io.esri_ascii.read_esri_ascii
~landlab.io.esri_ascii.write_esri_ascii
"""
import os
import pathlib
import re
import numpy as np
from landlab.utils import add_halo
_VALID_HEADER_KEYS = [
"ncols",
"nrows",
"xllcorner",
"xllcenter",
"yllcorner",
"yllcenter",
"cellsize",
"nodata_value",
]
_HEADER_KEY_REGEX_PATTERN = re.compile(r"\s*(?P<key>[a-zA-z]\w+)")
_HEADER_REGEX_PATTERN = re.compile(r"\s*(?P<key>[a-zA-Z]\w+)\s+(?P<value>[\w.+-]+)")
_HEADER_VALUE_TESTS = {
"nrows": (int, lambda x: x > 0),
"ncols": (int, lambda x: x > 0),
"cellsize": (float, lambda x: x > 0),
"xllcorner": (float, lambda x: True),
"xllcenter": (float, lambda x: True),
"yllcorner": (float, lambda x: True),
"yllcenter": (float, lambda x: True),
"nodata_value": (float, lambda x: True),
}
class Error(Exception):
"""Base class for errors in this module."""
pass
class BadHeaderLineError(Error):
"""Raise this error for a bad header is line."""
def __init__(self, line):
self._line = line
def __str__(self):
return self._line # this line not yet tested
class MissingRequiredKeyError(Error):
"""Raise this error when a header is missing a required key."""
def __init__(self, key):
self._key = key
def __str__(self):
return self._key
class KeyTypeError(Error):
"""Raise this error when a header's key value is of the wrong type."""
def __init__(self, key, expected_type):
self._key = key
self._type = str(expected_type)
def __str__(self):
return "Unable to convert %s to %s" % (self._key, self._type)
class KeyValueError(Error):
"""Raise this error when a header's key value has a bad value."""
def __init__(self, key, message):
self._key = key
self._msg = message
def __str__(self):
return "%s: %s" % (self._key, self._msg) # this line not yet tested
class DataSizeError(Error):
"""Raise this error if the size of data does not match the header."""
def __init__(self, size, expected_size):
self._actual = size
self._expected = expected_size
def __str__(self):
return "%s != %s" % (self._actual, self._expected) # this line not yet tested
class MismatchGridDataSizeError(Error):
"""Raise this error if the data size does not match the grid size."""
def __init__(self, size, expected_size):
self._actual = size
self._expected = expected_size
def __str__(self):
return "(data size) %s != %s (grid size)" % (
self._actual,
self._expected,
) # this line not yet tested
class MismatchGridXYSpacing(Error):
"""Raise this error if the file cell size does not match the grid dx."""
def __init__(self, dx, expected_dx):
self._actual = dx
self._expected = expected_dx
def __str__(self):
return "(data dx) %s != %s (grid dx)" % (
self._actual,
self._expected,
) # this line not yet tested
class MismatchGridXYLowerLeft(Error):
"""Raise this error if the file lower left does not match the grid."""
def __init__(self, llc, expected_llc):
self._actual = llc
self._expected = expected_llc
def __str__(self):
return "(data lower-left) %s != %s (grid lower-left)" % (
self._actual,
self._expected,
) # this line not yet tested
def _parse_header_key_value(line):
"""Parse a header line into a key-value pair.
Parameters
----------
line : str
Header line.
Returns
-------
(str, str)
Header key-value pair
Raises
------
BadHeaderLineError
The is something wrong with the header line.
"""
match = _HEADER_KEY_REGEX_PATTERN.match(line)
if match is None:
return None
# raise BadHeaderLineError(line)
match = _HEADER_REGEX_PATTERN.match(line)
if match is None:
raise BadHeaderLineError(line)
(key, value) = (match.group("key").lower(), match.group("value"))
if key in _VALID_HEADER_KEYS:
return (key, value)
else:
raise BadHeaderLineError(line)
def _header_lines(asc_file):
"""Iterate over header lines for a ESRI ASCII file.
Parameters
----------
asc_file : file_like
File-like object for an ESRI ASCII file.
Yields
------
str
Header line.
"""
pos = asc_file.tell()
line = asc_file.readline()
while len(line) > 0:
if len(line.strip()) > 0:
item = _parse_header_key_value(line)
if item:
yield item
else:
asc_file.seek(pos, 0)
break
pos = asc_file.tell()
line = asc_file.readline()
def _header_is_valid(header):
"""Check if the ESRI ASCII header is valid.
Parameters
----------
header : dict
Header as key-values pairs.
Raises
------
MissingRequiredKeyError
The header is missing a required key.
KeyTypeError
The header has the key but its values is of the wrong type.
"""
header_keys = set(header)
required_keys = set(["ncols", "nrows", "cellsize"])
if not required_keys.issubset(header_keys):
raise MissingRequiredKeyError(", ".join(required_keys - header_keys))
for keys in [("xllcenter", "xllcorner"), ("yllcenter", "yllcorner")]:
if len(set(keys) & header_keys) != 1:
raise MissingRequiredKeyError("|".join(keys))
for (key, requires) in _HEADER_VALUE_TESTS.items():
to_type, is_valid = requires
if key not in header:
continue
try:
header[key] = to_type(header[key])
except ValueError:
raise KeyTypeError(key, to_type)
if not is_valid(header[key]):
raise KeyValueError(key, "Bad value")
return True
def read_asc_header(asc_file):
"""Read header information from an ESRI ASCII raster file.
The header contains the following variables,
- *ncols*: Number of cell columns
- *nrows*: Number of cell rows
- *xllcenter* or *xllcorner*: X (column) coordinate of lower-left
coordinate of grid (by center or lower-left corner of the cell)
- *yllcenter*, *yllcorner*: Y (row) coordinate of lower-left
coordinate of grid (by center or lower-left corner of the cell)
- *cellsize*: Grid spacing between rows and columns
- *nodata_value*: No-data value (optional)
Parameters
----------
asc_file : file_like
File-like object from which to read header.
Returns
-------
dict
Header as key-value pairs.
Raises
------
MissingRequiredKeyError
The header is missing a required key.
KeyTypeError
The header has the key but its values is of the wrong type.
Examples
--------
>>> from io import StringIO
>>> from landlab.io.esri_ascii import read_asc_header
>>> contents = StringIO('''
... nrows 100
... ncols 200
... cellsize 1.5
... xllcenter 0.5
... yllcenter -0.5
... ''')
>>> hdr = read_asc_header(contents)
>>> hdr['nrows'], hdr['ncols']
(100, 200)
>>> hdr['cellsize']
1.5
>>> hdr['xllcenter'], hdr['yllcenter']
(0.5, -0.5)
``MissingRequiredKey`` is raised if the header does not contain all of the
necessary keys.
>>> contents = StringIO('''
... ncols 200
... cellsize 1.5
... xllcenter 0.5
... yllcenter -0.5
... ''')
>>> read_asc_header(contents) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
MissingRequiredKeyError: nrows
``KeyTypeError`` is raises if a value is of the wrong type. For instance,
``nrows`` and ``ncols`` must be ``int``.
>>> contents = StringIO('''
... nrows 100.5
... ncols 200
... cellsize 1.5
... xllcenter 0.5
... yllcenter -0.5
... ''')
>>> read_asc_header(contents) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyTypeError: Unable to convert nrows to <type 'int'>
"""
header = dict()
for (key, value) in _header_lines(asc_file):
header[key] = value
_header_is_valid(header)
return header
def _read_asc_data(asc_file):
"""Read gridded data from an ESRI ASCII data file.
Parameters
----------
asc_file : file-like
File-like object of the data file pointing to the start of the data.
.. note::
First row of the data is at the top of the raster grid, the second
row is the second from the top, and so on.
"""
return np.loadtxt(asc_file)
def read_esri_ascii(asc_file, grid=None, reshape=False, name=None, halo=0):
"""Read :py:class:`~landlab.RasterModelGrid` from an ESRI ASCII file.
Read data from *asc_file*, an ESRI_ ASCII file, into a
:py:class:`~landlab.RasterModelGrid`. *asc_file* is either the name of
the data file or is a file-like object.
The grid and data read from the file are returned as a tuple
(*grid*, *data*) where *grid* is an instance of
:py:class:`~landlab.RasterModelGrid` and *data* is a numpy
array of doubles with that has been reshaped to have the number of rows
and columns given in the header.
.. _ESRI: http://resources.esri.com/help/9.3/arcgisengine/java/GP_ToolRef/spatial_analyst_tools/esri_ascii_raster_format.htm
Parameters
----------
asc_file : str of file-like
Data file to read.
reshape : boolean, optional
Reshape the returned array, otherwise return a flattened array.
name : str, optional
Add data to the grid as a named field.
grid : *grid* , optional
Adds data to an existing *grid* instead of creating a new one.
halo : integer, optional
Adds outer border of depth halo to the *grid*.
Returns
-------
(grid, data) : tuple
A newly-created RasterModel grid and the associated node data.
Raises
------
DataSizeError
Data are not the same size as indicated by the header file.
MismatchGridDataSizeError
If a grid is passed, and the size of the grid does not agree with the
size of the data.
MismatchGridXYSpacing
If a grid is passed, and the cellsize listed in the heading does not
match the grid dx and dy.
MismatchGridXYLowerLeft
If a grid is passed and the xllcorner and yllcorner do not match that
of the grid.
Examples
--------
Assume that fop is the name of a file that contains text below
(make sure you have your path correct):
ncols 3
nrows 4
xllcorner 1.
yllcorner 2.
cellsize 10.
NODATA_value -9999
0. 1. 2.
3. 4. 5.
6. 7. 8.
9. 10. 11.
--------
>>> from landlab.io import read_esri_ascii
>>> (grid, data) = read_esri_ascii('fop') # doctest: +SKIP
>>> #grid is an object of type RasterModelGrid with 4 rows and 3 cols
>>> #data contains an array of length 4*3 that is equal to
>>> # [9., 10., 11., 6., 7., 8., 3., 4., 5., 0., 1., 2.]
>>> (grid, data) = read_esri_ascii('fop', halo=1) # doctest: +SKIP
>>> #now the data has a nodata_value ring of -9999 around it. So array is
>>> # [-9999, -9999, -9999, -9999, -9999, -9999,
>>> # -9999, 9., 10., 11., -9999,
>>> # -9999, 6., 7., 8., -9999,
>>> # -9999, 3., 4., 5., -9999,
>>> # -9999, 0., 1., 2. -9999,
>>> # -9999, -9999, -9999, -9999, -9999, -9999]
"""
from ..grid import RasterModelGrid
# if the asc_file is provided as a string, open it and pass the pointer to
# _read_asc_header, and _read_asc_data
if isinstance(asc_file, (str, pathlib.Path)):
with open(asc_file, "r") as f:
header = read_asc_header(f)
data = _read_asc_data(f)
# otherwise, pass asc_file directly.
else:
header = read_asc_header(asc_file)
data = _read_asc_data(asc_file)
# There is no reason for halo to be negative.
# Assume that if a negative value is given it should be 0.
if halo <= 0:
shape = (header["nrows"], header["ncols"])
if data.size != shape[0] * shape[1]:
raise DataSizeError(shape[0] * shape[1], data.size)
else:
shape = (header["nrows"] + 2 * halo, header["ncols"] + 2 * halo)
# check to see if a nodata_value was given. If not, assign -9999.
if "nodata_value" in header.keys():
nodata_value = header["nodata_value"]
else:
header["nodata_value"] = -9999.0
nodata_value = header["nodata_value"]
if data.size != (shape[0] - 2 * halo) * (shape[1] - 2 * halo):
raise DataSizeError(shape[0] * shape[1], data.size)
xy_spacing = (header["cellsize"], header["cellsize"])
xy_of_lower_left = (
header["xllcorner"] - halo * header["cellsize"],
header["yllcorner"] - halo * header["cellsize"],
)
data = np.flipud(data)
if halo > 0:
data = add_halo(
data.reshape(header["nrows"], header["ncols"]),
halo=halo,
halo_value=nodata_value,
).reshape((-1,))
if not reshape:
data = data.flatten()
if grid is not None:
if (grid.number_of_node_rows != shape[0]) or (
grid.number_of_node_columns != shape[1]
):
raise MismatchGridDataSizeError(
shape[0] * shape[1],
grid.number_of_node_rows * grid.number_of_node_columns,
)
if (grid.dx, grid.dy) != xy_spacing:
raise MismatchGridXYSpacing((grid.dx, grid.dy), xy_spacing)
if grid.xy_of_lower_left != xy_of_lower_left:
raise MismatchGridXYLowerLeft(grid.xy_of_lower_left, xy_of_lower_left)
if grid is None:
grid = RasterModelGrid(
shape, xy_spacing=xy_spacing, xy_of_lower_left=xy_of_lower_left
)
if name:
grid.add_field(name, data, at="node")
return (grid, data)
def write_esri_ascii(path, fields, names=None, clobber=False):
"""Write landlab fields to ESRI ASCII.
Write the data and grid information for *fields* to *path* in the ESRI
ASCII format.
Parameters
----------
path : str
Path to output file.
fields : field-like
Landlab field object that holds a grid and associated values.
names : iterable of str, optional
Names of the fields to include in the output file. If not provided,
write all fields.
clobber : boolean
If *path* exists, clobber the existing file, otherwise raise an
exception.
Examples
--------
>>> import numpy as np
>>> import os
>>> import tempfile
>>> from landlab import RasterModelGrid
>>> from landlab.io.esri_ascii import write_esri_ascii
>>> grid = RasterModelGrid((4, 5), xy_spacing=(2., 2.))
>>> _ = grid.add_field("air__temperature", np.arange(20.), at="node")
>>> with tempfile.TemporaryDirectory() as tmpdirname:
... fname = os.path.join(tmpdirname, 'test.asc')
... files = write_esri_ascii(fname, grid)
>>> for file in files:
... print(os.path.basename(file))
test.asc
>>> _ = grid.add_field("land_surface__elevation", np.arange(20.), at="node")
>>> with tempfile.TemporaryDirectory() as tmpdirname:
... fname = os.path.join(tmpdirname, 'test.asc')
... files = write_esri_ascii(fname, grid)
>>> files.sort()
>>> for file in files:
... print(os.path.basename(file))
test_air__temperature.asc
test_land_surface__elevation.asc
"""
if os.path.exists(path) and not clobber:
raise ValueError("file exists")
if isinstance(names, (str, pathlib.Path)):
names = [names]
names = names or fields.at_node.keys()
if len(names) == 1:
paths = [path]
elif len(names) > 1:
(base, ext) = os.path.splitext(path)
paths = [base + "_" + name + ext for name in names]
else:
raise ValueError("no node fields to write")
bad_names = set(names) - set(fields.at_node.keys())
if len(bad_names) > 0:
raise ValueError("unknown field name(s): %s" % ",".join(bad_names))
header = {
"ncols": fields.number_of_node_columns,
"nrows": fields.number_of_node_rows,
"xllcorner": fields.node_x[0],
"yllcorner": fields.node_y[0],
"cellsize": fields.dx,
}
for path, name in zip(paths, names):
header_lines = ["%s %s" % (key, str(val)) for key, val in list(header.items())]
data = fields.at_node[name].reshape(header["nrows"], header["ncols"])
np.savetxt(
path, np.flipud(data), header=os.linesep.join(header_lines), comments=""
)
return paths
| mit |
swiftstack/swift | swift/obj/server.py | 1 | 67311 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Object Server for Swift """
import six
import six.moves.cPickle as pickle
from six.moves.urllib.parse import unquote
import json
import os
import multiprocessing
import time
import traceback
import socket
import math
from swift import gettext_ as _
from eventlet import sleep, wsgi, Timeout, tpool
from eventlet.greenthread import spawn
from swift.common.utils import public, get_logger, \
config_true_value, timing_stats, replication, \
normalize_delete_at_timestamp, get_log_line, Timestamp, \
get_expirer_container, parse_mime_headers, \
iter_multipart_mime_documents, extract_swift_bytes, safe_json_loads, \
config_auto_int_value, split_path, get_redirect_data, \
normalize_timestamp, md5
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import check_object_creation, \
valid_timestamp, check_utf8, AUTO_CREATE_ACCOUNT_PREFIX
from swift.common.exceptions import ConnectionTimeout, DiskFileQuarantined, \
DiskFileNotExist, DiskFileCollision, DiskFileNoSpace, DiskFileDeleted, \
DiskFileDeviceUnavailable, DiskFileExpired, ChunkReadTimeout, \
ChunkReadError, DiskFileXattrNotSupported
from swift.common.request_helpers import \
OBJECT_SYSMETA_CONTAINER_UPDATE_OVERRIDE_PREFIX
from swift.obj import ssync_receiver
from swift.common.http import is_success, HTTP_MOVED_PERMANENTLY
from swift.common.base_storage_server import BaseStorageServer
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.request_helpers import get_name_and_placement, \
is_user_meta, is_sys_or_user_meta, is_object_transient_sysmeta, \
resolve_etag_is_at_header, is_sys_meta, validate_internal_obj
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPCreated, \
HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \
HTTPPreconditionFailed, HTTPRequestTimeout, HTTPUnprocessableEntity, \
HTTPClientDisconnect, HTTPMethodNotAllowed, Request, Response, \
HTTPInsufficientStorage, HTTPForbidden, HTTPException, HTTPConflict, \
HTTPServerError, wsgi_to_bytes, wsgi_to_str, normalize_etag
from swift.obj.diskfile import RESERVED_DATAFILE_META, DiskFileRouter
from swift.obj.expirer import build_task_obj
def iter_mime_headers_and_bodies(wsgi_input, mime_boundary, read_chunk_size):
mime_documents_iter = iter_multipart_mime_documents(
wsgi_input, mime_boundary, read_chunk_size)
for file_like in mime_documents_iter:
hdrs = parse_mime_headers(file_like)
yield (hdrs, file_like)
def drain(file_like, read_size, timeout):
"""
Read and discard any bytes from file_like.
:param file_like: file-like object to read from
:param read_size: how big a chunk to read at a time
:param timeout: how long to wait for a read (use None for no timeout)
:raises ChunkReadTimeout: if no chunk was read in time
"""
while True:
with ChunkReadTimeout(timeout):
chunk = file_like.read(read_size)
if not chunk:
break
def get_obj_name_and_placement(request):
"""
Split and validate path for an object.
:param request: a swob request
:returns: a tuple of path parts and storage policy
"""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
validate_internal_obj(account, container, obj)
return device, partition, account, container, obj, policy
def _make_backend_fragments_header(fragments):
if fragments:
result = {}
for ts, frag_list in fragments.items():
result[ts.internal] = frag_list
return json.dumps(result)
return None
class EventletPlungerString(bytes):
"""
Eventlet won't send headers until it's accumulated at least
eventlet.wsgi.MINIMUM_CHUNK_SIZE bytes or the app iter is exhausted.
If we want to send the response body behind Eventlet's back, perhaps
with some zero-copy wizardry, then we have to unclog the plumbing in
eventlet.wsgi to force the headers out, so we use an
EventletPlungerString to empty out all of Eventlet's buffers.
"""
def __len__(self):
return wsgi.MINIMUM_CHUNK_SIZE + 1
class ObjectController(BaseStorageServer):
"""Implements the WSGI application for the Swift Object Server."""
server_type = 'object-server'
def __init__(self, conf, logger=None):
"""
Creates a new WSGI application for the Swift Object Server. An
example configuration is given at
<source-dir>/etc/object-server.conf-sample or
/etc/swift/object-server.conf-sample.
"""
super(ObjectController, self).__init__(conf)
self.logger = logger or get_logger(conf, log_route='object-server')
self.node_timeout = float(conf.get('node_timeout', 3))
self.container_update_timeout = float(
conf.get('container_update_timeout', 1))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.client_timeout = float(conf.get('client_timeout', 60))
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.log_requests = config_true_value(conf.get('log_requests', 'true'))
self.max_upload_time = int(conf.get('max_upload_time', 86400))
self.slow = int(conf.get('slow', 0))
self.keep_cache_private = \
config_true_value(conf.get('keep_cache_private', 'false'))
default_allowed_headers = '''
content-disposition,
content-encoding,
x-delete-at,
x-object-manifest,
x-static-large-object,
cache-control,
content-language,
expires,
x-robots-tag
'''
extra_allowed_headers = [
header.strip().lower() for header in conf.get(
'allowed_headers', default_allowed_headers).split(',')
if header.strip()
]
self.allowed_headers = set()
for header in extra_allowed_headers:
if header not in RESERVED_DATAFILE_META:
self.allowed_headers.add(header)
if conf.get('auto_create_account_prefix'):
self.logger.warning('Option auto_create_account_prefix is '
'deprecated. Configure '
'auto_create_account_prefix under the '
'swift-constraints section of '
'swift.conf. This option will '
'be ignored in a future release.')
self.auto_create_account_prefix = \
conf['auto_create_account_prefix']
else:
self.auto_create_account_prefix = AUTO_CREATE_ACCOUNT_PREFIX
self.expiring_objects_account = self.auto_create_account_prefix + \
(conf.get('expiring_objects_account_name') or 'expiring_objects')
self.expiring_objects_container_divisor = \
int(conf.get('expiring_objects_container_divisor') or 86400)
# Initialization was successful, so now apply the network chunk size
# parameter as the default read / write buffer size for the network
# sockets.
#
# NOTE WELL: This is a class setting, so until we get set this on a
# per-connection basis, this affects reading and writing on ALL
# sockets, those between the proxy servers and external clients, and
# those between the proxy servers and the other internal servers.
#
# ** Because the primary motivation for this is to optimize how data
# is written back to the proxy server, we could use the value from the
# disk_chunk_size parameter. However, it affects all created sockets
# using this class so we have chosen to tie it to the
# network_chunk_size parameter value instead.
if six.PY2:
socket._fileobject.default_bufsize = self.network_chunk_size
# TODO: find a way to enable similar functionality in py3
# Provide further setup specific to an object server implementation.
self.setup(conf)
def setup(self, conf):
"""
Implementation specific setup. This method is called at the very end
by the constructor to allow a specific implementation to modify
existing attributes or add its own attributes.
:param conf: WSGI configuration parameter
"""
# Common on-disk hierarchy shared across account, container and object
# servers.
self._diskfile_router = DiskFileRouter(conf, self.logger)
# This is populated by global_conf_callback way below as the semaphore
# is shared by all workers.
if 'replication_semaphore' in conf:
# The value was put in a list so it could get past paste
self.replication_semaphore = conf['replication_semaphore'][0]
else:
self.replication_semaphore = None
self.replication_failure_threshold = int(
conf.get('replication_failure_threshold') or 100)
self.replication_failure_ratio = float(
conf.get('replication_failure_ratio') or 1.0)
servers_per_port = int(conf.get('servers_per_port', '0') or 0)
if servers_per_port:
# The typical servers-per-port deployment also uses one port per
# disk, so you really get N servers per disk. In that case,
# having a pool of 20 threads per server per disk is far too
# much. For example, given a 60-disk chassis and 4 servers per
# disk, the default configuration will give us 21 threads per
# server (the main thread plus the twenty tpool threads), for a
# total of around 60 * 21 * 4 = 5040 threads. This is clearly
# too high.
#
# Instead, we use a tpool size of 1, giving us 2 threads per
# process. In the example above, that's 60 * 2 * 4 = 480
# threads, which is reasonable since there are 240 processes.
default_tpool_size = 1
else:
# If we're not using servers-per-port, then leave the tpool size
# alone. The default (20) is typically good enough for one
# object server handling requests for many disks.
default_tpool_size = None
tpool_size = config_auto_int_value(
conf.get('eventlet_tpool_num_threads'),
default_tpool_size)
if tpool_size:
tpool.set_num_threads(tpool_size)
def get_diskfile(self, device, partition, account, container, obj,
policy, **kwargs):
"""
Utility method for instantiating a DiskFile object supporting a given
REST API.
An implementation of the object server that wants to use a different
DiskFile class would simply over-ride this method to provide that
behavior.
"""
return self._diskfile_router[policy].get_diskfile(
device, partition, account, container, obj, policy, **kwargs)
def async_update(self, op, account, container, obj, host, partition,
contdevice, headers_out, objdevice, policy,
logger_thread_locals=None, container_path=None):
"""
Sends or saves an async update.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param host: host that the container is on
:param partition: partition that the container is on
:param contdevice: device name that the container is on
:param headers_out: dictionary of headers to send in the container
request
:param objdevice: device name that the object is in
:param policy: the associated BaseStoragePolicy instance
:param logger_thread_locals: The thread local values to be set on the
self.logger to retain transaction
logging information.
:param container_path: optional path in the form `<account/container>`
to which the update should be sent. If given this path will be used
instead of constructing a path from the ``account`` and
``container`` params.
"""
if logger_thread_locals:
self.logger.thread_locals = logger_thread_locals
headers_out['user-agent'] = 'object-server %s' % os.getpid()
if container_path:
# use explicitly specified container path
full_path = '/%s/%s' % (container_path, obj)
else:
full_path = '/%s/%s/%s' % (account, container, obj)
redirect_data = None
if all([host, partition, contdevice]):
try:
with ConnectionTimeout(self.conn_timeout):
ip, port = host.rsplit(':', 1)
conn = http_connect(ip, port, contdevice, partition, op,
full_path, headers_out)
with Timeout(self.node_timeout):
response = conn.getresponse()
response.read()
if is_success(response.status):
return
if response.status == HTTP_MOVED_PERMANENTLY:
try:
redirect_data = get_redirect_data(response)
except ValueError as err:
self.logger.error(
'Container update failed for %r; problem with '
'redirect location: %s' % (obj, err))
else:
self.logger.error(_(
'ERROR Container update failed '
'(saving for async update later): %(status)d '
'response from %(ip)s:%(port)s/%(dev)s'),
{'status': response.status, 'ip': ip, 'port': port,
'dev': contdevice})
except (Exception, Timeout):
self.logger.exception(_(
'ERROR container update failed with '
'%(ip)s:%(port)s/%(dev)s (saving for async update later)'),
{'ip': ip, 'port': port, 'dev': contdevice})
data = {'op': op, 'account': account, 'container': container,
'obj': obj, 'headers': headers_out}
if redirect_data:
self.logger.debug(
'Update to %(path)s redirected to %(redirect)s',
{'path': full_path, 'redirect': redirect_data[0]})
container_path = redirect_data[0]
if container_path:
data['container_path'] = container_path
timestamp = headers_out.get('x-meta-timestamp',
headers_out.get('x-timestamp'))
self._diskfile_router[policy].pickle_async_update(
objdevice, account, container, obj, data, timestamp, policy)
def container_update(self, op, account, container, obj, request,
headers_out, objdevice, policy):
"""
Update the container when objects are updated.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param request: the original request object driving the update
:param headers_out: dictionary of headers to send in the container
request(s)
:param objdevice: device name that the object is in
:param policy: the BaseStoragePolicy instance
"""
headers_in = request.headers
conthosts = [h.strip() for h in
headers_in.get('X-Container-Host', '').split(',')]
contdevices = [d.strip() for d in
headers_in.get('X-Container-Device', '').split(',')]
contpartition = headers_in.get('X-Container-Partition', '')
if len(conthosts) != len(contdevices):
# This shouldn't happen unless there's a bug in the proxy,
# but if there is, we want to know about it.
self.logger.error(_(
'ERROR Container update failed: different '
'numbers of hosts and devices in request: '
'"%(hosts)s" vs "%(devices)s"') % {
'hosts': headers_in.get('X-Container-Host', ''),
'devices': headers_in.get('X-Container-Device', '')})
return
contpath = headers_in.get('X-Backend-Quoted-Container-Path')
if contpath:
contpath = unquote(contpath)
else:
contpath = headers_in.get('X-Backend-Container-Path')
if contpath:
try:
# TODO: this is very late in request handling to be validating
# a header - if we did *not* check and the header was bad
# presumably the update would fail and we would fall back to an
# async update to the root container, which might be best
# course of action rather than aborting update altogether?
split_path('/' + contpath, minsegs=2, maxsegs=2)
except ValueError:
self.logger.error(
"Invalid X-Backend-Container-Path, should be of the form "
"'account/container' but got %r." % contpath)
# fall back to updating root container
contpath = None
if contpartition:
# In py3, zip() continues to work for our purposes... But when
# we want to log an error, consumed items are not longer present
# in the zip, making the logs useless for operators. So, list().
updates = list(zip(conthosts, contdevices))
else:
updates = []
headers_out['x-trans-id'] = headers_in.get('x-trans-id', '-')
headers_out['referer'] = request.as_referer()
headers_out['X-Backend-Storage-Policy-Index'] = int(policy)
update_greenthreads = []
for conthost, contdevice in updates:
gt = spawn(self.async_update, op, account, container, obj,
conthost, contpartition, contdevice, headers_out,
objdevice, policy,
logger_thread_locals=self.logger.thread_locals,
container_path=contpath)
update_greenthreads.append(gt)
# Wait a little bit to see if the container updates are successful.
# If we immediately return after firing off the greenthread above, then
# we're more likely to confuse the end-user who does a listing right
# after getting a successful response to the object create. The
# `container_update_timeout` bounds the length of time we wait so that
# one slow container server doesn't make the entire request lag.
try:
with Timeout(self.container_update_timeout):
for gt in update_greenthreads:
gt.wait()
except Timeout:
# updates didn't go through, log it and return
self.logger.debug(
'Container update timeout (%.4fs) waiting for %s',
self.container_update_timeout, updates)
def delete_at_update(self, op, delete_at, account, container, obj,
request, objdevice, policy):
"""
Update the expiring objects container when objects are updated.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param delete_at: scheduled delete in UNIX seconds, int
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param request: the original request driving the update
:param objdevice: device name that the object is in
:param policy: the BaseStoragePolicy instance (used for tmp dir)
"""
if config_true_value(
request.headers.get('x-backend-replication', 'f')):
return
delete_at = normalize_delete_at_timestamp(delete_at)
updates = [(None, None)]
partition = None
hosts = contdevices = [None]
headers_in = request.headers
headers_out = HeaderKeyDict({
# system accounts are always Policy-0
'X-Backend-Storage-Policy-Index': 0,
'x-timestamp': request.timestamp.internal,
'x-trans-id': headers_in.get('x-trans-id', '-'),
'referer': request.as_referer()})
if op != 'DELETE':
hosts = headers_in.get('X-Delete-At-Host', None)
if hosts is None:
# If header is missing, no update needed as sufficient other
# object servers should perform the required update.
return
delete_at_container = headers_in.get('X-Delete-At-Container', None)
if not delete_at_container:
# older proxy servers did not send X-Delete-At-Container so for
# backwards compatibility calculate the value here, but also
# log a warning because this is prone to inconsistent
# expiring_objects_container_divisor configurations.
# See https://bugs.launchpad.net/swift/+bug/1187200
self.logger.warning(
'X-Delete-At-Container header must be specified for '
'expiring objects background %s to work properly. Making '
'best guess as to the container name for now.' % op)
delete_at_container = get_expirer_container(
delete_at, self.expiring_objects_container_divisor,
account, container, obj)
partition = headers_in.get('X-Delete-At-Partition', None)
contdevices = headers_in.get('X-Delete-At-Device', '')
updates = [upd for upd in
zip((h.strip() for h in hosts.split(',')),
(c.strip() for c in contdevices.split(',')))
if all(upd) and partition]
if not updates:
updates = [(None, None)]
headers_out['x-size'] = '0'
headers_out['x-content-type'] = 'text/plain'
headers_out['x-etag'] = 'd41d8cd98f00b204e9800998ecf8427e'
else:
if not config_true_value(
request.headers.get(
'X-Backend-Clean-Expiring-Object-Queue', 't')):
return
# DELETEs of old expiration data have no way of knowing what the
# old X-Delete-At-Container was at the time of the initial setting
# of the data, so a best guess is made here.
# Worst case is a DELETE is issued now for something that doesn't
# exist there and the original data is left where it is, where
# it will be ignored when the expirer eventually tries to issue the
# object DELETE later since the X-Delete-At value won't match up.
delete_at_container = get_expirer_container(
delete_at, self.expiring_objects_container_divisor,
account, container, obj)
delete_at_container = normalize_delete_at_timestamp(
delete_at_container)
for host, contdevice in updates:
self.async_update(
op, self.expiring_objects_account, delete_at_container,
build_task_obj(delete_at, account, container, obj),
host, partition, contdevice, headers_out, objdevice,
policy)
def _make_timeout_reader(self, file_like):
def timeout_reader():
with ChunkReadTimeout(self.client_timeout):
try:
return file_like.read(self.network_chunk_size)
except (IOError, ValueError):
raise ChunkReadError
return timeout_reader
def _read_put_commit_message(self, mime_documents_iter):
rcvd_commit = False
try:
with ChunkReadTimeout(self.client_timeout):
commit_hdrs, commit_iter = next(mime_documents_iter)
if commit_hdrs.get('X-Document', None) == "put commit":
rcvd_commit = True
drain(commit_iter, self.network_chunk_size, self.client_timeout)
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
except StopIteration:
raise HTTPBadRequest(body="couldn't find PUT commit MIME doc")
return rcvd_commit
def _read_metadata_footer(self, mime_documents_iter):
try:
with ChunkReadTimeout(self.client_timeout):
footer_hdrs, footer_iter = next(mime_documents_iter)
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
except StopIteration:
raise HTTPBadRequest(body="couldn't find footer MIME doc")
return self._parse_footer(footer_hdrs, footer_iter)
def _parse_footer(self, footer_hdrs, footer_iter):
"""
Validate footer metadata and translate JSON body into HeaderKeyDict.
"""
timeout_reader = self._make_timeout_reader(footer_iter)
try:
footer_body = b''.join(iter(timeout_reader, b''))
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
footer_md5 = footer_hdrs.get('Content-MD5')
if not footer_md5:
raise HTTPBadRequest(body="no Content-MD5 in footer")
if footer_md5 != md5(footer_body, usedforsecurity=False).hexdigest():
raise HTTPUnprocessableEntity(body="footer MD5 mismatch")
try:
return HeaderKeyDict(json.loads(footer_body))
except ValueError:
raise HTTPBadRequest("invalid JSON for footer doc")
def _check_container_override(self, update_headers, metadata,
footers=None):
"""
Applies any overrides to the container update headers.
Overrides may be in the x-object-sysmeta-container-update- namespace or
the x-backend-container-update-override- namespace. The former is
preferred and is used by proxy middlewares. The latter is historical
but is still used with EC policy PUT requests; for backwards
compatibility the header names used with EC policy requests have not
been changed to the sysmeta namespace - that way the EC PUT path of a
newer proxy will remain compatible with an object server that pre-dates
the introduction of the x-object-sysmeta-container-update- namespace
and vice-versa.
:param update_headers: a dict of headers used in the container update
:param metadata: a dict that may container override items
:param footers: another dict that may container override items, at a
higher priority than metadata
"""
footers = footers or {}
# the order of this list is significant:
# x-object-sysmeta-container-update-override-* headers take precedence
# over x-backend-container-update-override-* headers
override_prefixes = ['x-backend-container-update-override-',
OBJECT_SYSMETA_CONTAINER_UPDATE_OVERRIDE_PREFIX]
for override_prefix in override_prefixes:
for key, val in metadata.items():
if key.lower().startswith(override_prefix):
override = key.lower().replace(override_prefix, 'x-')
update_headers[override] = val
# apply x-backend-container-update-override* from footers *before*
# x-object-sysmeta-container-update-override-* from headers
for key, val in footers.items():
if key.lower().startswith(override_prefix):
override = key.lower().replace(override_prefix, 'x-')
update_headers[override] = val
@public
@timing_stats()
def POST(self, request):
"""Handle HTTP POST requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_obj_name_and_placement(request)
req_timestamp = valid_timestamp(request)
new_delete_at = int(request.headers.get('X-Delete-At') or 0)
if new_delete_at and new_delete_at < req_timestamp:
return HTTPBadRequest(body='X-Delete-At in past', request=request,
content_type='text/plain')
next_part_power = request.headers.get('X-Backend-Next-Part-Power')
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, open_expired=config_true_value(
request.headers.get('x-backend-replication', 'false')),
next_part_power=next_part_power)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
orig_metadata = disk_file.read_metadata(current_time=req_timestamp)
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except (DiskFileNotExist, DiskFileQuarantined):
return HTTPNotFound(request=request)
orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0))
orig_ctype_timestamp = disk_file.content_type_timestamp
req_ctype_time = '0'
req_ctype = request.headers.get('Content-Type')
if req_ctype:
req_ctype_time = request.headers.get('Content-Type-Timestamp',
req_timestamp.internal)
req_ctype_timestamp = Timestamp(req_ctype_time)
if orig_timestamp >= req_timestamp \
and orig_ctype_timestamp >= req_ctype_timestamp:
return HTTPConflict(
request=request,
headers={'X-Backend-Timestamp': orig_timestamp.internal})
if req_timestamp > orig_timestamp:
metadata = {'X-Timestamp': req_timestamp.internal}
metadata.update(val for val in request.headers.items()
if (is_user_meta('object', val[0]) or
is_object_transient_sysmeta(val[0])))
headers_to_copy = (
request.headers.get(
'X-Backend-Replication-Headers', '').split() +
list(self.allowed_headers))
for header_key in headers_to_copy:
if header_key in request.headers:
header_caps = header_key.title()
metadata[header_caps] = request.headers[header_key]
orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
if orig_delete_at != new_delete_at:
if new_delete_at:
self.delete_at_update(
'PUT', new_delete_at, account, container, obj, request,
device, policy)
if orig_delete_at:
self.delete_at_update('DELETE', orig_delete_at, account,
container, obj, request, device,
policy)
else:
# preserve existing metadata, only content-type may be updated
metadata = dict(disk_file.get_metafile_metadata())
if req_ctype_timestamp > orig_ctype_timestamp:
# we have a new content-type, add to metadata and container update
content_type_headers = {
'Content-Type': request.headers['Content-Type'],
'Content-Type-Timestamp': req_ctype_timestamp.internal
}
metadata.update(content_type_headers)
else:
# send existing content-type with container update
content_type_headers = {
'Content-Type': disk_file.content_type,
'Content-Type-Timestamp': orig_ctype_timestamp.internal
}
if orig_ctype_timestamp != disk_file.data_timestamp:
# only add to metadata if it's not the datafile content-type
metadata.update(content_type_headers)
try:
disk_file.write_metadata(metadata)
except (DiskFileXattrNotSupported, DiskFileNoSpace):
return HTTPInsufficientStorage(drive=device, request=request)
if (content_type_headers['Content-Type-Timestamp']
!= disk_file.data_timestamp):
# Current content-type is not from the datafile, but the datafile
# content-type may have a swift_bytes param that was appended by
# SLO and we must continue to send that with the container update.
# Do this (rather than use a separate header) for backwards
# compatibility because there may be 'legacy' container updates in
# async pending that have content-types with swift_bytes params, so
# we have to be able to handle those in container server anyway.
_, swift_bytes = extract_swift_bytes(
disk_file.get_datafile_metadata()['Content-Type'])
if swift_bytes:
content_type_headers['Content-Type'] += (';swift_bytes=%s'
% swift_bytes)
update_headers = HeaderKeyDict({
'x-size': orig_metadata['Content-Length'],
'x-content-type': content_type_headers['Content-Type'],
'x-timestamp': disk_file.data_timestamp.internal,
'x-content-type-timestamp':
content_type_headers['Content-Type-Timestamp'],
'x-meta-timestamp': metadata['X-Timestamp'],
'x-etag': orig_metadata['ETag']})
# Special cases for backwards compatibility.
# For EC policy, send X-Object-Sysmeta-Ec-Etag which is same as the
# X-Backend-Container-Update-Override-Etag value sent with the original
# PUT. Similarly send X-Object-Sysmeta-Ec-Content-Length which is the
# same as the X-Backend-Container-Update-Override-Size value. We have
# to send Etag and size with a POST container update because the
# original PUT container update may have failed or be in async_pending.
if 'X-Object-Sysmeta-Ec-Etag' in orig_metadata:
update_headers['X-Etag'] = orig_metadata[
'X-Object-Sysmeta-Ec-Etag']
if 'X-Object-Sysmeta-Ec-Content-Length' in orig_metadata:
update_headers['X-Size'] = orig_metadata[
'X-Object-Sysmeta-Ec-Content-Length']
self._check_container_override(update_headers, orig_metadata)
# object POST updates are PUT to the container server
self.container_update(
'PUT', account, container, obj, request, update_headers,
device, policy)
# Add current content-type and sysmeta to response
resp_headers = {
'X-Backend-Content-Type': content_type_headers['Content-Type']}
for key, value in orig_metadata.items():
if is_sys_meta('object', key):
resp_headers[key] = value
return HTTPAccepted(request=request, headers=resp_headers)
def _pre_create_checks(self, request, device, partition,
account, container, obj, policy):
req_timestamp = valid_timestamp(request)
error_response = check_object_creation(request, obj)
if error_response:
raise error_response
try:
fsize = request.message_length()
except ValueError as e:
raise HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
# In case of multipart-MIME put, the proxy sends a chunked request,
# but may let us know the real content length so we can verify that
# we have enough disk space to hold the object.
if fsize is None:
fsize = request.headers.get('X-Backend-Obj-Content-Length')
if fsize is not None:
try:
fsize = int(fsize)
except ValueError as e:
raise HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
# SSYNC will include Frag-Index header for subrequests, in which case
# get_diskfile will ignore non-matching on-disk data files
frag_index = request.headers.get('X-Backend-Ssync-Frag-Index')
next_part_power = request.headers.get('X-Backend-Next-Part-Power')
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, frag_index=frag_index,
next_part_power=next_part_power)
except DiskFileDeviceUnavailable:
raise HTTPInsufficientStorage(drive=device, request=request)
try:
orig_metadata = disk_file.read_metadata(current_time=req_timestamp)
orig_timestamp = disk_file.data_timestamp
except DiskFileXattrNotSupported:
raise HTTPInsufficientStorage(drive=device, request=request)
except DiskFileDeleted as e:
orig_metadata = {}
orig_timestamp = e.timestamp
except (DiskFileNotExist, DiskFileQuarantined):
orig_metadata = {}
orig_timestamp = Timestamp(0)
# Checks for If-None-Match
if request.if_none_match is not None and orig_metadata:
if '*' in request.if_none_match:
# File exists already so return 412
raise HTTPPreconditionFailed(request=request)
if orig_metadata.get('ETag') in request.if_none_match:
# The current ETag matches, so raise 412
raise HTTPPreconditionFailed(request=request)
if orig_timestamp >= req_timestamp:
raise HTTPConflict(
request=request,
headers={'X-Backend-Timestamp': orig_timestamp.internal})
return disk_file, fsize, orig_metadata
def _do_multi_stage_mime_continue_headers(self, request, obj_input):
"""
If the proxy wants to send us object metadata after the object body, it
sets some headers. We have to tell the proxy, in the 100 Continue
response, that we're able to parse a multipart MIME document and
extract the object and metadata from it. If we don't, then the proxy
won't actually send the footer metadata.
If the proxy doesn't want to do any of that, this is the identity
function for obj_input and multi_stage_mime_state will be False-y.
:returns: a tuple, (obj_input, multi_stage_mime_state)
"""
have_metadata_footer = False
use_multiphase_commit = False
hundred_continue_headers = []
if config_true_value(
request.headers.get(
'X-Backend-Obj-Multiphase-Commit')):
use_multiphase_commit = True
hundred_continue_headers.append(
('X-Obj-Multiphase-Commit', 'yes'))
if config_true_value(
request.headers.get('X-Backend-Obj-Metadata-Footer')):
have_metadata_footer = True
hundred_continue_headers.append(
('X-Obj-Metadata-Footer', 'yes'))
if have_metadata_footer or use_multiphase_commit:
obj_input.set_hundred_continue_response_headers(
hundred_continue_headers)
mime_boundary = wsgi_to_bytes(request.headers.get(
'X-Backend-Obj-Multipart-Mime-Boundary'))
if not mime_boundary:
raise HTTPBadRequest("no MIME boundary")
with ChunkReadTimeout(self.client_timeout):
mime_documents_iter = iter_mime_headers_and_bodies(
request.environ['wsgi.input'],
mime_boundary, self.network_chunk_size)
_junk_hdrs, obj_input = next(mime_documents_iter)
multi_stage_mime_state = {
'have_metadata_footer': have_metadata_footer,
'use_multiphase_commit': use_multiphase_commit,
'mime_documents_iter': mime_documents_iter,
}
else:
multi_stage_mime_state = {}
return obj_input, multi_stage_mime_state
def _stage_obj_data(self, request, device, obj_input, writer, fsize):
"""
Feed the object_input into the writer.
:returns: a tuple, (upload_size, etag)
"""
writer.open()
elapsed_time = 0
upload_expiration = time.time() + self.max_upload_time
timeout_reader = self._make_timeout_reader(obj_input)
for chunk in iter(timeout_reader, b''):
start_time = time.time()
if start_time > upload_expiration:
self.logger.increment('PUT.timeouts')
raise HTTPRequestTimeout(request=request)
writer.write(chunk)
elapsed_time += time.time() - start_time
upload_size, etag = writer.chunks_finished()
if fsize is not None and fsize != upload_size:
raise HTTPClientDisconnect(request=request)
if upload_size:
self.logger.transfer_rate(
'PUT.' + device + '.timing', elapsed_time,
upload_size)
return upload_size, etag
def _get_request_metadata(self, request, upload_size, etag):
"""
Pull object metadata off the request.
:returns: metadata, a dict of object metadata
"""
metadata = {
'X-Timestamp': request.timestamp.internal,
'Content-Type': request.headers['content-type'],
'Content-Length': str(upload_size),
'ETag': etag,
}
metadata.update(val for val in request.headers.items()
if (is_sys_or_user_meta('object', val[0]) or
is_object_transient_sysmeta(val[0])))
headers_to_copy = (
request.headers.get(
'X-Backend-Replication-Headers', '').split() +
list(self.allowed_headers))
for header_key in headers_to_copy:
if header_key in request.headers:
header_caps = header_key.title()
metadata[header_caps] = request.headers[header_key]
return metadata
def _read_mime_footers_metadata(self, have_metadata_footer,
mime_documents_iter, **kwargs):
"""
Read footer metadata from the bottom of the multi-stage MIME body.
:returns: metadata, a dict
"""
if have_metadata_footer:
metadata = self._read_metadata_footer(
mime_documents_iter)
footer_etag = metadata.pop('etag', '').lower()
if footer_etag:
metadata['ETag'] = footer_etag
else:
metadata = {}
return metadata
def _apply_extra_metadata(self, request, metadata, footers_metadata):
"""
Apply extra metadata precedence to prepare metadata for storage.
"""
metadata.update(val for val in footers_metadata.items()
if (is_sys_or_user_meta('object', val[0]) or
is_object_transient_sysmeta(val[0])))
# N.B. footers_metadata is a HeaderKeyDict
received_etag = normalize_etag(footers_metadata.get(
'etag', request.headers.get('etag', '')))
if received_etag and received_etag != metadata['ETag']:
raise HTTPUnprocessableEntity(request=request)
def _send_multi_stage_continue_headers(self, request,
use_multiphase_commit,
mime_documents_iter, **kwargs):
"""
If the PUT requires a two-phase commit (a data and a commit phase) send
the proxy server another 100-continue response to indicate that we are
finished writing object data
"""
if use_multiphase_commit:
request.environ['wsgi.input'].\
send_hundred_continue_response()
if not self._read_put_commit_message(mime_documents_iter):
raise HTTPServerError(request=request)
def _drain_mime_request(self, mime_documents_iter, **kwargs):
"""
Drain any remaining MIME docs from the socket. There shouldn't be any,
but we must read the whole request body.
"""
try:
while True:
with ChunkReadTimeout(self.client_timeout):
_junk_hdrs, _junk_body = next(mime_documents_iter)
drain(_junk_body, self.network_chunk_size,
self.client_timeout)
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
except StopIteration:
pass
def _post_commit_updates(self, request, device,
account, container, obj, policy,
orig_metadata, footers_metadata, metadata):
orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
new_delete_at = int(request.headers.get('X-Delete-At') or 0)
if orig_delete_at != new_delete_at:
if new_delete_at:
self.delete_at_update(
'PUT', new_delete_at, account, container, obj, request,
device, policy)
if orig_delete_at:
self.delete_at_update(
'DELETE', orig_delete_at, account, container, obj,
request, device, policy)
update_headers = HeaderKeyDict({
'x-size': metadata['Content-Length'],
'x-content-type': metadata['Content-Type'],
'x-timestamp': metadata['X-Timestamp'],
'x-etag': metadata['ETag']})
# apply any container update header overrides sent with request
self._check_container_override(update_headers, request.headers,
footers_metadata)
self.container_update(
'PUT', account, container, obj, request,
update_headers, device, policy)
@public
@timing_stats()
def PUT(self, request):
"""Handle HTTP PUT requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_obj_name_and_placement(request)
disk_file, fsize, orig_metadata = self._pre_create_checks(
request, device, partition, account, container, obj, policy)
writer = disk_file.writer(size=fsize)
try:
obj_input = request.environ['wsgi.input']
obj_input, multi_stage_mime_state = \
self._do_multi_stage_mime_continue_headers(request, obj_input)
upload_size, etag = self._stage_obj_data(
request, device, obj_input, writer, fsize)
metadata = self._get_request_metadata(request, upload_size, etag)
if multi_stage_mime_state:
footers_metadata = self._read_mime_footers_metadata(
**multi_stage_mime_state)
else:
footers_metadata = {}
self._apply_extra_metadata(request, metadata, footers_metadata)
writer.put(metadata)
if multi_stage_mime_state:
self._send_multi_stage_continue_headers(
request, **multi_stage_mime_state)
writer.commit(request.timestamp)
if multi_stage_mime_state:
self._drain_mime_request(**multi_stage_mime_state)
except (DiskFileXattrNotSupported, DiskFileNoSpace):
return HTTPInsufficientStorage(drive=device, request=request)
except ChunkReadError:
return HTTPClientDisconnect(request=request)
except ChunkReadTimeout:
return HTTPRequestTimeout(request=request)
finally:
writer.close()
self._post_commit_updates(request, device,
account, container, obj, policy,
orig_metadata, footers_metadata, metadata)
return HTTPCreated(request=request, etag=etag)
@public
@timing_stats()
def GET(self, request):
"""Handle HTTP GET requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_obj_name_and_placement(request)
request.headers.setdefault('X-Timestamp',
normalize_timestamp(time.time()))
req_timestamp = valid_timestamp(request)
frag_prefs = safe_json_loads(
request.headers.get('X-Backend-Fragment-Preferences'))
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, frag_prefs=frag_prefs,
open_expired=config_true_value(
request.headers.get('x-backend-replication', 'false')))
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
with disk_file.open(current_time=req_timestamp):
metadata = disk_file.get_metadata()
ignore_range_headers = set(
h.strip().lower()
for h in request.headers.get(
'X-Backend-Ignore-Range-If-Metadata-Present',
'').split(','))
if ignore_range_headers.intersection(
h.lower() for h in metadata):
request.headers.pop('Range', None)
obj_size = int(metadata['Content-Length'])
file_x_ts = Timestamp(metadata['X-Timestamp'])
keep_cache = (self.keep_cache_private or
('X-Auth-Token' not in request.headers and
'X-Storage-Token' not in request.headers))
conditional_etag = resolve_etag_is_at_header(request, metadata)
response = Response(
app_iter=disk_file.reader(keep_cache=keep_cache),
request=request, conditional_response=True,
conditional_etag=conditional_etag)
response.headers['Content-Type'] = metadata.get(
'Content-Type', 'application/octet-stream')
for key, value in metadata.items():
if (is_sys_or_user_meta('object', key) or
is_object_transient_sysmeta(key) or
key.lower() in self.allowed_headers):
response.headers[key] = value
response.etag = metadata['ETag']
response.last_modified = math.ceil(float(file_x_ts))
response.content_length = obj_size
try:
response.content_encoding = metadata[
'Content-Encoding']
except KeyError:
pass
response.headers['X-Timestamp'] = file_x_ts.normal
response.headers['X-Backend-Timestamp'] = file_x_ts.internal
response.headers['X-Backend-Data-Timestamp'] = \
disk_file.data_timestamp.internal
if disk_file.durable_timestamp:
response.headers['X-Backend-Durable-Timestamp'] = \
disk_file.durable_timestamp.internal
response.headers['X-Backend-Fragments'] = \
_make_backend_fragments_header(disk_file.fragments)
resp = request.get_response(response)
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except (DiskFileNotExist, DiskFileQuarantined) as e:
headers = {}
if hasattr(e, 'timestamp'):
headers['X-Backend-Timestamp'] = e.timestamp.internal
resp = HTTPNotFound(request=request, headers=headers,
conditional_response=True)
return resp
@public
@timing_stats(sample_rate=0.8)
def HEAD(self, request):
"""Handle HTTP HEAD requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_obj_name_and_placement(request)
request.headers.setdefault('X-Timestamp',
normalize_timestamp(time.time()))
req_timestamp = valid_timestamp(request)
frag_prefs = safe_json_loads(
request.headers.get('X-Backend-Fragment-Preferences'))
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, frag_prefs=frag_prefs,
open_expired=config_true_value(
request.headers.get('x-backend-replication', 'false')))
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
metadata = disk_file.read_metadata(current_time=req_timestamp)
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except (DiskFileNotExist, DiskFileQuarantined) as e:
headers = {}
if hasattr(e, 'timestamp'):
headers['X-Backend-Timestamp'] = e.timestamp.internal
return HTTPNotFound(request=request, headers=headers,
conditional_response=True)
conditional_etag = resolve_etag_is_at_header(request, metadata)
response = Response(request=request, conditional_response=True,
conditional_etag=conditional_etag)
response.headers['Content-Type'] = metadata.get(
'Content-Type', 'application/octet-stream')
for key, value in metadata.items():
if (is_sys_or_user_meta('object', key) or
is_object_transient_sysmeta(key) or
key.lower() in self.allowed_headers):
response.headers[key] = value
response.etag = metadata['ETag']
ts = Timestamp(metadata['X-Timestamp'])
response.last_modified = math.ceil(float(ts))
# Needed for container sync feature
response.headers['X-Timestamp'] = ts.normal
response.headers['X-Backend-Timestamp'] = ts.internal
response.headers['X-Backend-Data-Timestamp'] = \
disk_file.data_timestamp.internal
if disk_file.durable_timestamp:
response.headers['X-Backend-Durable-Timestamp'] = \
disk_file.durable_timestamp.internal
response.headers['X-Backend-Fragments'] = \
_make_backend_fragments_header(disk_file.fragments)
response.content_length = int(metadata['Content-Length'])
try:
response.content_encoding = metadata['Content-Encoding']
except KeyError:
pass
return response
@public
@timing_stats()
def DELETE(self, request):
"""Handle HTTP DELETE requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_obj_name_and_placement(request)
req_timestamp = valid_timestamp(request)
next_part_power = request.headers.get('X-Backend-Next-Part-Power')
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, next_part_power=next_part_power)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
orig_metadata = disk_file.read_metadata(current_time=req_timestamp)
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except DiskFileExpired as e:
orig_timestamp = e.timestamp
orig_metadata = e.metadata
response_class = HTTPNotFound
except DiskFileDeleted as e:
orig_timestamp = e.timestamp
orig_metadata = {}
response_class = HTTPNotFound
except (DiskFileNotExist, DiskFileQuarantined):
orig_timestamp = 0
orig_metadata = {}
response_class = HTTPNotFound
else:
orig_timestamp = disk_file.data_timestamp
if orig_timestamp < req_timestamp:
response_class = HTTPNoContent
else:
response_class = HTTPConflict
response_timestamp = max(orig_timestamp, req_timestamp)
orig_delete_at = Timestamp(orig_metadata.get('X-Delete-At') or 0)
try:
req_if_delete_at_val = request.headers['x-if-delete-at']
req_if_delete_at = Timestamp(req_if_delete_at_val)
except KeyError:
pass
except ValueError:
return HTTPBadRequest(
request=request,
body='Bad X-If-Delete-At header value')
else:
# request includes x-if-delete-at; we must not place a tombstone
# if we can not verify the x-if-delete-at time
if not orig_timestamp:
# no object found at all
return HTTPNotFound()
if orig_timestamp >= req_timestamp:
# Found a newer object -- return 409 as work item is stale
return HTTPConflict()
if orig_delete_at != req_if_delete_at:
return HTTPPreconditionFailed(
request=request,
body='X-If-Delete-At and X-Delete-At do not match')
else:
# differentiate success from no object at all
response_class = HTTPNoContent
if orig_delete_at:
self.delete_at_update('DELETE', orig_delete_at, account,
container, obj, request, device,
policy)
if orig_timestamp < req_timestamp:
try:
disk_file.delete(req_timestamp)
except DiskFileNoSpace:
return HTTPInsufficientStorage(drive=device, request=request)
self.container_update(
'DELETE', account, container, obj, request,
HeaderKeyDict({'x-timestamp': req_timestamp.internal}),
device, policy)
return response_class(
request=request,
headers={'X-Backend-Timestamp': response_timestamp.internal,
'X-Backend-Content-Type': orig_metadata.get(
'Content-Type', '')})
@public
@replication
@timing_stats(sample_rate=0.1)
def REPLICATE(self, request):
"""
Handle REPLICATE requests for the Swift Object Server. This is used
by the object replicator to get hashes for directories.
Note that the name REPLICATE is preserved for historical reasons as
this verb really just returns the hashes information for the specified
parameters and is used, for example, by both replication and EC.
"""
device, partition, suffix_parts, policy = \
get_name_and_placement(request, 2, 3, True)
suffixes = suffix_parts.split('-') if suffix_parts else []
try:
hashes = self._diskfile_router[policy].get_hashes(
device, partition, suffixes, policy,
skip_rehash=bool(suffixes))
except DiskFileDeviceUnavailable:
resp = HTTPInsufficientStorage(drive=device, request=request)
else:
# force pickle protocol for compatibility with py2 nodes
resp = Response(body=pickle.dumps(hashes, protocol=2))
return resp
@public
@replication
@timing_stats(sample_rate=0.1)
def SSYNC(self, request):
return Response(app_iter=ssync_receiver.Receiver(self, request)())
def __call__(self, env, start_response):
"""WSGI Application entry point for the Swift Object Server."""
start_time = time.time()
req = Request(env)
self.logger.txn_id = req.headers.get('x-trans-id', None)
if not check_utf8(wsgi_to_str(req.path_info), internal=True):
res = HTTPPreconditionFailed(body='Invalid UTF8 or contains NULL')
else:
try:
# disallow methods which have not been marked 'public'
if req.method not in self.allowed_methods:
res = HTTPMethodNotAllowed()
else:
res = getattr(self, req.method)(req)
except DiskFileCollision:
res = HTTPForbidden(request=req)
except HTTPException as error_response:
res = error_response
except (Exception, Timeout):
self.logger.exception(_(
'ERROR __call__ error with %(method)s'
' %(path)s '), {'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
trans_time = time.time() - start_time
res.fix_conditional_response()
if self.log_requests:
log_line = get_log_line(req, res, trans_time, '', self.log_format,
self.anonymization_method,
self.anonymization_salt)
if req.method in ('REPLICATE', 'SSYNC') or \
'X-Backend-Replication' in req.headers:
self.logger.debug(log_line)
else:
self.logger.info(log_line)
if req.method in ('PUT', 'DELETE'):
slow = self.slow - trans_time
if slow > 0:
sleep(slow)
# To be able to zero-copy send the object, we need a few things.
# First, we have to be responding successfully to a GET, or else we're
# not sending the object. Second, we have to be able to extract the
# socket file descriptor from the WSGI input object. Third, the
# diskfile has to support zero-copy send.
#
# There's a good chance that this could work for 206 responses too,
# but the common case is sending the whole object, so we'll start
# there.
if req.method == 'GET' and res.status_int == 200 and \
isinstance(env['wsgi.input'], wsgi.Input):
app_iter = getattr(res, 'app_iter', None)
checker = getattr(app_iter, 'can_zero_copy_send', None)
if checker and checker():
# For any kind of zero-copy thing like sendfile or splice, we
# need the file descriptor. Eventlet doesn't provide a clean
# way of getting that, so we resort to this.
wsock = env['wsgi.input'].get_socket()
wsockfd = wsock.fileno()
# Don't call zero_copy_send() until after we force the HTTP
# headers out of Eventlet and into the socket.
def zero_copy_iter():
# If possible, set TCP_CORK so that headers don't
# immediately go on the wire, but instead, wait for some
# response body to make the TCP frames as large as
# possible (and hence as few packets as possible).
#
# On non-Linux systems, we might consider TCP_NODELAY, but
# since the only known zero-copy-capable diskfile uses
# Linux-specific syscalls, we'll defer that work until
# someone needs it.
if hasattr(socket, 'TCP_CORK'):
wsock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_CORK, 1)
yield EventletPlungerString()
try:
app_iter.zero_copy_send(wsockfd)
except Exception:
self.logger.exception("zero_copy_send() blew up")
raise
yield b''
# Get headers ready to go out
res(env, start_response)
return zero_copy_iter()
else:
return res(env, start_response)
else:
return res(env, start_response)
def global_conf_callback(preloaded_app_conf, global_conf):
"""
Callback for swift.common.wsgi.run_wsgi during the global_conf
creation so that we can add our replication_semaphore, used to
limit the number of concurrent SSYNC_REQUESTS across all
workers.
:param preloaded_app_conf: The preloaded conf for the WSGI app.
This conf instance will go away, so
just read from it, don't write.
:param global_conf: The global conf that will eventually be
passed to the app_factory function later.
This conf is created before the worker
subprocesses are forked, so can be useful to
set up semaphores, shared memory, etc.
"""
replication_concurrency = int(
preloaded_app_conf.get('replication_concurrency') or 4)
if replication_concurrency:
# Have to put the value in a list so it can get past paste
global_conf['replication_semaphore'] = [
multiprocessing.BoundedSemaphore(replication_concurrency)]
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI object server apps"""
conf = global_conf.copy()
conf.update(local_conf)
return ObjectController(conf)
| apache-2.0 |
Zarokka/exaile | plugins/lastfmlove/__init__.py | 4 | 10746 | # Copyright (C) 2011 Mathias Brodala <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk
import logging
import os.path
from threading import (
Thread,
Timer
)
import pylast
from xl import (
common,
event,
player,
providers,
settings
)
from xl.nls import gettext as _
from xlgui import icons
from xlgui.widgets.menu import MenuItem
from xlgui.widgets.playlist_columns import (
Column,
ColumnMenuItem
)
import lastfmlove_preferences
from cellrenderertoggleimage import CellRendererToggleImage
LASTFMLOVER = None
logger = logging.getLogger(__name__)
basedir = os.path.dirname(os.path.realpath(__file__))
icons.MANAGER.add_icon_name_from_directory('love',
os.path.join(basedir, 'icons'))
icons.MANAGER.add_icon_name_from_directory('send-receive',
os.path.join(basedir, 'icons'))
def enable(exaile):
"""
Handles the deferred enable call
"""
global LASTFMLOVER
LASTFMLOVER = LastFMLover()
def disable(exaile):
"""
Disables the desktop cover plugin
"""
global LASTFMLOVER
LASTFMLOVER.destroy()
def get_preferences_pane():
return lastfmlove_preferences
class LoveColumn(Column):
name = 'loved'
display = _('Loved')
menu_title = _('Last.fm Loved')
size = 50
renderer = CellRendererToggleImage
datatype = bool
dataproperty = 'active'
def __init__(self, *args):
Column.__init__(self, *args)
self.model = None
pixbuf = icons.MANAGER.pixbuf_from_icon_name('love', self.get_icon_height())
self.cellrenderer.props.pixbuf = pixbuf
self.cellrenderer.connect('toggled', self.on_toggled)
def data_func(self, column, cellrenderer, model, iter):
"""
Displays the loved state
"""
global LASTFMLOVER
track = model.get_value(iter, 0)
lastfm_track = pylast.Track(
track.get_tag_display('artist'),
track.get_tag_display('title'),
LASTFMLOVER.network
)
cellrenderer.props.active = lastfm_track in LASTFMLOVER.loved_tracks
if LASTFMLOVER.network is None:
cellrenderer.props.sensitive = False
cellrenderer.props.render_prelit = False
else:
cellrenderer.props.sensitive = True
cellrenderer.props.render_prelit = True
self.model = model
def on_toggled(self, cellrenderer, path):
"""
Loves or unloves the selected track
"""
global LASTFMLOVER
if cellrenderer.props.sensitive and LASTFMLOVER.network is not None:
track = self.model.get_value(self.model.get_iter(path), 0)
LASTFMLOVER.toggle_loved(track)
class LoveMenuItem(MenuItem):
"""
A menu item representing the loved state of a
track and allowing for loving and unloving it
"""
def __init__(self, after, get_tracks_function=None):
MenuItem.__init__(self, 'loved', None, after)
self.get_tracks_function = get_tracks_function
def factory(self, menu, parent, context):
"""
Sets up the menu item
"""
global LASTFMLOVER
item = Gtk.ImageMenuItem.new_with_mnemonic(_('_Love This Track'))
item.set_image(Gtk.Image.new_from_icon_name(
'love', Gtk.IconSize.MENU))
if self.get_tracks_function is not None:
tracks = self.get_tracks_function()
empty = len(tracks) == 0
else:
empty = context.get('selection-empty', True)
if not empty:
tracks = context.get('selected-tracks', [])
if not empty and LASTFMLOVER.network is not None:
# We only care about the first track
track = tracks[0]
lastfm_track = pylast.Track(
track.get_tag_display('artist'),
track.get_tag_display('title'),
LASTFMLOVER.network
)
if lastfm_track in LASTFMLOVER.loved_tracks:
item.set_label(_('Unlove This Track'))
item.connect('activate', self.on_activate, track)
else:
item.set_sensitive(False)
return item
def on_activate(self, menuitem, track):
"""
Loves or unloves the selected track
"""
global LASTFMLOVER
LASTFMLOVER.toggle_loved(track)
class LastFMLover(object):
"""
Allows for retrieval and setting
of loved tracks via Last.fm
"""
def __init__(self):
"""
Sets up the connection to Last.fm
as well as the graphical interface
"""
self.network = None
self.user = None
self.loved_tracks = []
self.timer = None
self.column_menu_item = ColumnMenuItem(column=LoveColumn, after=['__rating'])
self.menu_item = LoveMenuItem(after=['rating'])
def get_tracks_function():
"""
Drop in replacement for menu item context
to retrieve the currently playing track
"""
current_track = player.PLAYER.current
if current_track is not None:
return [current_track]
return []
self.tray_menu_item = LoveMenuItem(
after=['rating'],
get_tracks_function=get_tracks_function
)
self.setup_network()
providers.register('playlist-columns', LoveColumn);
providers.register('playlist-columns-menu', self.column_menu_item)
providers.register('playlist-context-menu', self.menu_item)
providers.register('tray-icon-context', self.tray_menu_item)
event.add_ui_callback(self.on_option_set, 'plugin_lastfmlove_option_set')
def destroy(self):
"""
Cleanups
"""
event.remove_callback(self.on_option_set, 'plugin_lastfmlove_option_set')
providers.unregister('tray-icon-context', self.tray_menu_item)
providers.unregister('playlist-context-menu', self.menu_item)
providers.unregister('playlist-columns-menu', self.column_menu_item)
providers.unregister('playlist-columns', LoveColumn)
if self.timer is not None and self.timer.is_alive():
self.timer.cancel()
def setup_network(self):
"""
Tries to set up the network, retrieve the user
and the initial list of loved tracks
"""
try:
self.network = pylast.LastFMNetwork(
api_key=settings.get_option('plugin/lastfmlove/api_key', 'K'),
api_secret=settings.get_option('plugin/lastfmlove/api_secret', 'S'),
username=settings.get_option('plugin/ascrobbler/user', ''),
password_hash=settings.get_option('plugin/ascrobbler/password', '')
)
self.user = self.network.get_user(self.network.username)
except Exception as e:
self.network = None
self.user = None
if self.timer is not None and self.timer.is_alive():
self.timer.cancel()
logger.warning('Error while connecting to Last.fm network: {0}'.format(e))
else:
thread = Thread(target=self.get_loved_tracks)
thread.daemon = True
thread.start()
logger.info('Connection to Last.fm network successful')
def restart_timer(self):
"""
Restarts the timer which starts the retrieval of tracks
"""
if self.timer is not None and self.timer.is_alive():
self.timer.cancel()
self.timer = Timer(
settings.get_option('plugin/lastfmlove/refresh_interval', 3600),
self.get_loved_tracks
)
self.timer.daemon = True
self.timer.start()
def get_loved_tracks(self):
"""
Updates the list of loved tracks
"""
logger.debug('Retrieving list of loved tracks...')
try:
tracks = self.user.get_loved_tracks(limit=None)
# Unwrap pylast.Track from pylast.LovedTrack
self.loved_tracks = [l.track for l in tracks]
except Exception as e:
logger.warning('Failed to retrieve list of loved tracks: {0}'.format(e))
self.restart_timer()
def toggle_loved(self, track):
"""
Toggles the loved state of a track
:param track: the track to love/unlove
:type track: `xl.trax.Track`
"""
lastfm_track = pylast.Track(
track.get_tag_display('artist'),
track.get_tag_display('title'),
LASTFMLOVER.network
)
if lastfm_track in self.loved_tracks:
self.unlove_track(lastfm_track)
else:
self.love_track(lastfm_track)
@common.threaded
def love_track(self, track):
"""
Loves a track
:param track: the track to love
:type track: `pylast.Track`
"""
try:
track.love()
except Exception as e:
logger.warning('Error while loving track {0}: {1}'.format(track, e))
else:
self.loved_tracks.append(track)
logger.info('Loved track {0}'.format(track))
@common.threaded
def unlove_track(self, track):
"""
Unloves a track
:param track: the track to unlove
:type track: `pylast.Track`
"""
try:
track.unlove()
except Exception as e:
logger.warning('Error while unloving track {0}: {1}'.format(track, e))
else:
self.loved_tracks.remove(track)
logger.info('Unloved track {0}'.format(track))
def on_option_set(self, event, settings, option):
"""
Takes action upon setting changes
"""
if option in ('plugin/lastfmlove/api_key', 'plugin/lastfmlove/api_secret'):
self.setup_network()
elif option == 'plugin/lastfmlove/refresh_interval':
self.restart_timer()
| gpl-2.0 |
40223149/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/zipfile.py | 620 | 66368 | """
Read and write ZIP files.
XXX references to utf-8 need further investigation.
"""
import io
import os
import re
import imp
import sys
import time
import stat
import shutil
import struct
import binascii
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
crc32 = binascii.crc32
try:
import bz2 # We may need its compression method
except ImportError:
bz2 = None
try:
import lzma # We may need its compression method
except ImportError:
lzma = None
__all__ = ["BadZipFile", "BadZipfile", "error",
"ZIP_STORED", "ZIP_DEFLATED", "ZIP_BZIP2", "ZIP_LZMA",
"is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile"]
class BadZipFile(Exception):
pass
class LargeZipFile(Exception):
"""
Raised when writing a zipfile, the zipfile requires ZIP64 extensions
and those extensions are disabled.
"""
error = BadZipfile = BadZipFile # Pre-3.2 compatibility names
ZIP64_LIMIT = (1 << 31) - 1
ZIP_FILECOUNT_LIMIT = 1 << 16
ZIP_MAX_COMMENT = (1 << 16) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
ZIP_BZIP2 = 12
ZIP_LZMA = 14
# Other ZIP compression methods not supported
DEFAULT_VERSION = 20
ZIP64_VERSION = 45
BZIP2_VERSION = 46
LZMA_VERSION = 63
# we recognize (but not necessarily support) all features up to that version
MAX_EXTRACT_VERSION = 63
# Below are some formats and associated data for reading/writing headers using
# the struct module. The names and structures of headers/records are those used
# in the PKWARE description of the ZIP file format:
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# (URL valid as of January 2008)
# The "end of central directory" structure, magic number, size, and indices
# (section V.I in the format document)
structEndArchive = b"<4s4H2LH"
stringEndArchive = b"PK\005\006"
sizeEndCentDir = struct.calcsize(structEndArchive)
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
# These last two indices are not part of the structure as defined in the
# spec, but they are used internally by this module as a convenience
_ECD_COMMENT = 8
_ECD_LOCATION = 9
# The "central directory" structure, magic number, size, and indices
# of entries in the structure (section V.F in the format document)
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = b"PK\001\002"
sizeCentralDir = struct.calcsize(structCentralDir)
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# The "local file header" structure, magic number, size, and indices
# (section V.A in the format document)
structFileHeader = "<4s2B4HL2L2H"
stringFileHeader = b"PK\003\004"
sizeFileHeader = struct.calcsize(structFileHeader)
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# The "Zip64 end of central directory locator" structure, magic number, and size
structEndArchive64Locator = "<4sLQL"
stringEndArchive64Locator = b"PK\x06\x07"
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
# The "Zip64 end of central directory" record, magic number, size, and indices
# (section V.G in the format document)
structEndArchive64 = "<4sQ2H2L4Q"
stringEndArchive64 = b"PK\x06\x06"
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
_CD64_SIGNATURE = 0
_CD64_DIRECTORY_RECSIZE = 1
_CD64_CREATE_VERSION = 2
_CD64_EXTRACT_VERSION = 3
_CD64_DISK_NUMBER = 4
_CD64_DISK_NUMBER_START = 5
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
_CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
def _check_zipfile(fp):
try:
if _EndRecData(fp):
return True # file has correct magic number
except IOError:
pass
return False
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except IOError:
pass
return result
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except IOError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
if len(data) != sizeEndCentDir64Locator:
return endrec
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks != 1:
raise BadZipFile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
if len(data) != sizeEndCentDir64:
return endrec
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except IOError:
return None
data = fpin.read()
if (len(data) == sizeEndCentDir and
data[0:4] == stringEndArchive and
data[-2:] == b"\000\000"):
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append(b"")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
if len(recData) != sizeEndCentDir:
# Zip file is corrupted.
return None
endrec = list(struct.unpack(structEndArchive, recData))
commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file
comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize]
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return None
class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
if date_time[0] < 1980:
raise ValueError('ZIP does not support timestamps before 1980')
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = b"" # Comment for each file
self.extra = b"" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = DEFAULT_VERSION # Version which created ZIP archive
self.extract_version = DEFAULT_VERSION # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def FileHeader(self, zip64=None):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
min_version = 0
if zip64 is None:
zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT
if zip64:
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
if not zip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
file_size = 0xffffffff
compress_size = 0xffffffff
min_version = ZIP64_VERSION
if self.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif self.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
self.extract_version = max(min_version, self.extract_version)
self.create_version = max(min_version, self.create_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra
def _encodeFilenameFlags(self):
try:
return self.filename.encode('ascii'), self.flag_bits
except UnicodeEncodeError:
return self.filename.encode('utf-8'), self.flag_bits | 0x800
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while extra:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise RuntimeError("Corrupt extra field %s"%(ln,))
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffff, 0xffffffff):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFF:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffff:
old = self.header_offset
self.header_offset = counts[idx]
idx+=1
extra = extra[ln+4:]
class _ZipDecrypter:
"""Class to handle decryption of files stored within a ZIP archive.
ZIP supports a password-based form of encryption. Even though known
plaintext attacks have been found against it, it is still useful
to be able to get data out of such a file.
Usage:
zd = _ZipDecrypter(mypwd)
plain_char = zd(cypher_char)
plain_text = map(zd, cypher_text)
"""
def _GenerateCRCTable():
"""Generate a CRC-32 table.
ZIP encryption uses the CRC32 one-byte primitive for scrambling some
internal keys. We noticed that a direct implementation is faster than
relying on binascii.crc32().
"""
poly = 0xedb88320
table = [0] * 256
for i in range(256):
crc = i
for j in range(8):
if crc & 1:
crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
else:
crc = ((crc >> 1) & 0x7FFFFFFF)
table[i] = crc
return table
crctable = _GenerateCRCTable()
def _crc32(self, ch, crc):
"""Compute the CRC32 primitive on one byte."""
return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ch) & 0xff]
def __init__(self, pwd):
self.key0 = 305419896
self.key1 = 591751049
self.key2 = 878082192
for p in pwd:
self._UpdateKeys(p)
def _UpdateKeys(self, c):
self.key0 = self._crc32(c, self.key0)
self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295
self.key1 = (self.key1 * 134775813 + 1) & 4294967295
self.key2 = self._crc32((self.key1 >> 24) & 255, self.key2)
def __call__(self, c):
"""Decrypt a single character."""
assert isinstance(c, int)
k = self.key2 | 2
c = c ^ (((k * (k^1)) >> 8) & 255)
self._UpdateKeys(c)
return c
class LZMACompressor:
def __init__(self):
self._comp = None
def _init(self):
props = lzma._encode_filter_properties({'id': lzma.FILTER_LZMA1})
self._comp = lzma.LZMACompressor(lzma.FORMAT_RAW, filters=[
lzma._decode_filter_properties(lzma.FILTER_LZMA1, props)
])
return struct.pack('<BBH', 9, 4, len(props)) + props
def compress(self, data):
if self._comp is None:
return self._init() + self._comp.compress(data)
return self._comp.compress(data)
def flush(self):
if self._comp is None:
return self._init() + self._comp.flush()
return self._comp.flush()
class LZMADecompressor:
def __init__(self):
self._decomp = None
self._unconsumed = b''
self.eof = False
def decompress(self, data):
if self._decomp is None:
self._unconsumed += data
if len(self._unconsumed) <= 4:
return b''
psize, = struct.unpack('<H', self._unconsumed[2:4])
if len(self._unconsumed) <= 4 + psize:
return b''
self._decomp = lzma.LZMADecompressor(lzma.FORMAT_RAW, filters=[
lzma._decode_filter_properties(lzma.FILTER_LZMA1,
self._unconsumed[4:4 + psize])
])
data = self._unconsumed[4 + psize:]
del self._unconsumed
result = self._decomp.decompress(data)
self.eof = self._decomp.eof
return result
compressor_names = {
0: 'store',
1: 'shrink',
2: 'reduce',
3: 'reduce',
4: 'reduce',
5: 'reduce',
6: 'implode',
7: 'tokenize',
8: 'deflate',
9: 'deflate64',
10: 'implode',
12: 'bzip2',
14: 'lzma',
18: 'terse',
19: 'lz77',
97: 'wavpack',
98: 'ppmd',
}
def _check_compression(compression):
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError(
"Compression requires the (missing) zlib module")
elif compression == ZIP_BZIP2:
if not bz2:
raise RuntimeError(
"Compression requires the (missing) bz2 module")
elif compression == ZIP_LZMA:
if not lzma:
raise RuntimeError(
"Compression requires the (missing) lzma module")
else:
raise RuntimeError("That compression method is not supported")
def _get_compressor(compress_type):
if compress_type == ZIP_DEFLATED:
return zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
elif compress_type == ZIP_BZIP2:
return bz2.BZ2Compressor()
elif compress_type == ZIP_LZMA:
return LZMACompressor()
else:
return None
def _get_decompressor(compress_type):
if compress_type == ZIP_STORED:
return None
elif compress_type == ZIP_DEFLATED:
return zlib.decompressobj(-15)
elif compress_type == ZIP_BZIP2:
return bz2.BZ2Decompressor()
elif compress_type == ZIP_LZMA:
return LZMADecompressor()
else:
descr = compressor_names.get(compress_type)
if descr:
raise NotImplementedError("compression type %d (%s)" % (compress_type, descr))
else:
raise NotImplementedError("compression type %d" % (compress_type,))
class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
# Search for universal newlines or line chunks.
PATTERN = re.compile(br'^(?P<chunk>[^\r\n]+)|(?P<newline>\n|\r\n?)')
def __init__(self, fileobj, mode, zipinfo, decrypter=None,
close_fileobj=False):
self._fileobj = fileobj
self._decrypter = decrypter
self._close_fileobj = close_fileobj
self._compress_type = zipinfo.compress_type
self._compress_left = zipinfo.compress_size
self._left = zipinfo.file_size
self._decompressor = _get_decompressor(self._compress_type)
self._eof = False
self._readbuffer = b''
self._offset = 0
self._universal = 'U' in mode
self.newlines = None
# Adjust read size for encrypted files since the first 12 bytes
# are for the encryption/password information.
if self._decrypter is not None:
self._compress_left -= 12
self.mode = mode
self.name = zipinfo.filename
if hasattr(zipinfo, 'CRC'):
self._expected_crc = zipinfo.CRC
self._running_crc = crc32(b'') & 0xffffffff
else:
self._expected_crc = None
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = b''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == b'':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + b'\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
if len(chunk) > self._offset:
self._readbuffer = chunk + self._readbuffer[self._offset:]
self._offset = 0
else:
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
"""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
buf += self._read1(self.MAX_N)
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while n > 0 and not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
buf += data
n -= len(data)
return buf
def _update_crc(self, newdata):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc) & 0xffffffff
# Check the CRC if we're at the end of the file
if self._eof and self._running_crc != self._expected_crc:
raise BadZipFile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
if n is None or n < 0:
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
while not self._eof:
data = self._read1(self.MAX_N)
if data:
buf += data
break
return buf
end = n + self._offset
if end < len(self._readbuffer):
buf = self._readbuffer[self._offset:end]
self._offset = end
return buf
n = end - len(self._readbuffer)
buf = self._readbuffer[self._offset:]
self._readbuffer = b''
self._offset = 0
if n > 0:
while not self._eof:
data = self._read1(n)
if n < len(data):
self._readbuffer = data
self._offset = n
buf += data[:n]
break
if data:
buf += data
break
return buf
def _read1(self, n):
# Read up to n compressed bytes with at most one read() system call,
# decrypt and decompress them.
if self._eof or n <= 0:
return b''
# Read from file.
if self._compress_type == ZIP_DEFLATED:
## Handle unconsumed data.
data = self._decompressor.unconsumed_tail
if n > len(data):
data += self._read2(n - len(data))
else:
data = self._read2(n)
if self._compress_type == ZIP_STORED:
self._eof = self._compress_left <= 0
elif self._compress_type == ZIP_DEFLATED:
n = max(n, self.MIN_READ_SIZE)
data = self._decompressor.decompress(data, n)
self._eof = (self._decompressor.eof or
self._compress_left <= 0 and
not self._decompressor.unconsumed_tail)
if self._eof:
data += self._decompressor.flush()
else:
data = self._decompressor.decompress(data)
self._eof = self._decompressor.eof or self._compress_left <= 0
data = data[:self._left]
self._left -= len(data)
if self._left <= 0:
self._eof = True
self._update_crc(data)
return data
def _read2(self, n):
if self._compress_left <= 0:
return b''
n = max(n, self.MIN_READ_SIZE)
n = min(n, self._compress_left)
data = self._fileobj.read(n)
self._compress_left -= len(data)
if self._decrypter is not None:
data = bytes(map(self._decrypter, data))
return data
def close(self):
try:
if self._close_fileobj:
self._fileobj.close()
finally:
super().close()
class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read "r", write "w" or append "a".
compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
_windows_illegal_name_trans_table = None
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False):
"""Open the ZIP file with mode read "r", write "w" or append "a"."""
if mode not in ("r", "w", "a"):
raise RuntimeError('ZipFile() requires mode "r", "w", or "a"')
_check_compression(compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = key = mode.replace('b', '')[0]
self.pwd = None
self._comment = b''
# Check if we were passed a file-like object
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
try:
self.fp = io.open(file, modeDict[mode])
except IOError:
if mode == 'a':
mode = key = 'w'
self.fp = io.open(file, modeDict[mode])
else:
raise
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
try:
if key == 'r':
self._RealGetContents()
elif key == 'w':
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
elif key == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir, 0)
except BadZipFile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
else:
raise RuntimeError('Mode must be "r", "w" or "a"')
except:
fp = self.fp
self.fp = None
if not self._filePassed:
fp.close()
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except IOError:
raise BadZipFile("File is not a zip file")
if not endrec:
raise BadZipFile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipFile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipFile("Bad magic number for central directory")
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
if flags & 0x800:
# UTF-8 file names extension
filename = filename.decode('utf-8')
else:
# Historical ZIP filename encoding
filename = filename.decode('cp437')
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
if x.extract_version > MAX_EXTRACT_VERSION:
raise NotImplementedError("zip file version %.1f" %
(x.extract_version / 10))
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print("total", total)
def namelist(self):
"""Return a list of file names in the archive."""
return [data.filename for data in self.filelist]
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self, file=None):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
file=file)
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
file=file)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipFile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
@property
def comment(self):
"""The comment text associated with the ZIP file."""
return self._comment
@comment.setter
def comment(self, comment):
if not isinstance(comment, bytes):
raise TypeError("comment: expected bytes, got %s" % type(comment))
# check for valid comment length
if len(comment) >= ZIP_MAX_COMMENT:
if self.debug:
print('Archive comment is too long; truncating to %d bytes'
% ZIP_MAX_COMMENT)
comment = comment[:ZIP_MAX_COMMENT]
self._comment = comment
self._didModify = True
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
with self.open(name, "r", pwd) as fp:
return fp.read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
# Only open a new file for instances where we were not
# given a file object in the constructor
if self._filePassed:
zef_file = self.fp
else:
zef_file = io.open(self.filename, 'rb')
try:
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
zef_file.seek(zinfo.header_offset, 0)
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipFile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipFile("Bad magic number for file header")
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if zinfo.flag_bits & 0x20:
# Zip 2.7: compressed patched data
raise NotImplementedError("compressed patched data (flag bit 5)")
if zinfo.flag_bits & 0x40:
# strong encryption
raise NotImplementedError("strong encryption (flag bit 6)")
if zinfo.flag_bits & 0x800:
# UTF-8 filename
fname_str = fname.decode("utf-8")
else:
fname_str = fname.decode("cp437")
if fname_str != zinfo.orig_filename:
raise BadZipFile(
'File name in directory %r and header %r differ.'
% (zinfo.orig_filename, fname))
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError("File %s is encrypted, password "
"required for extraction" % name)
zd = _ZipDecrypter(pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
header = zef_file.read(12)
h = list(map(zd, header[0:12]))
if zinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zinfo.CRC >> 24) & 0xff
if h[11] != check_byte:
raise RuntimeError("Bad password for file", name)
return ZipExtFile(zef_file, mode, zinfo, zd,
close_fileobj=not self._filePassed)
except:
if not self._filePassed:
zef_file.close()
raise
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
@classmethod
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
invalid_path_parts = ('', os.path.curdir, os.path.pardir)
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in invalid_path_parts)
if os.path.sep == '\\':
# filter illegal characters on Windows
arcname = self._sanitize_windows_name(arcname, os.path.sep)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
open(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
if self.debug: # Warning for duplicate names
print("Duplicate name:", zinfo.filename)
if self.mode not in ("w", "a"):
raise RuntimeError('write() requires mode "w" or "a"')
if not self.fp:
raise RuntimeError(
"Attempt to write ZIP archive that was already closed")
_check_compression(zinfo.compress_type)
if zinfo.file_size > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
if zinfo.header_offset > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile(
"Zipfile size would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
zinfo.header_offset = self.fp.tell() # Start of header bytes
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
return
cmpr = _get_compressor(zinfo.compress_type)
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
zinfo.file_size * 1.05 > ZIP64_LIMIT
self.fp.write(zinfo.FileHeader(zip64))
file_size = 0
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
if not zip64 and self._allowZip64:
if file_size > ZIP64_LIMIT:
raise RuntimeError('File size has increased during compressing')
if compress_size > ZIP64_LIMIT:
raise RuntimeError('Compressed size larger than uncompressed size')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
position = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset, 0)
self.fp.write(zinfo.FileHeader(zip64))
self.fp.seek(position, 0)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, data, compress_type=None):
"""Write a file into the archive. The contents is 'data', which
may be either a 'str' or a 'bytes' instance; if it is a 'str',
it is encoded as UTF-8 first.
'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if isinstance(data, str):
data = data.encode("utf-8")
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
zinfo.external_attr = 0o600 << 16
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.file_size = len(data) # Uncompressed size
zinfo.header_offset = self.fp.tell() # Start of header data
if compress_type is not None:
zinfo.compress_type = compress_type
if zinfo.compress_type == ZIP_LZMA:
# Compressed data includes an end-of-stream (EOS) marker
zinfo.flag_bits |= 0x02
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(data) & 0xffffffff # CRC-32 checksum
co = _get_compressor(zinfo.compress_type)
if co:
data = co.compress(data) + co.flush()
zinfo.compress_size = len(data) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zip64 = zinfo.file_size > ZIP64_LIMIT or \
zinfo.compress_size > ZIP64_LIMIT
if zip64 and not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
self.fp.write(zinfo.FileHeader(zip64))
self.fp.write(data)
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.flush()
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ("w", "a") and self._didModify: # write ending records
count = 0
pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
min_version = 0
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
min_version = ZIP64_VERSION
if zinfo.compress_type == ZIP_BZIP2:
min_version = max(BZIP2_VERSION, min_version)
elif zinfo.compress_type == ZIP_LZMA:
min_version = max(LZMA_VERSION, min_version)
extract_version = max(min_version, zinfo.extract_version)
create_version = max(min_version, zinfo.create_version)
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = count
centDirSize = pos2 - pos1
centDirOffset = pos1
if (centDirCount >= ZIP_FILECOUNT_LIMIT or
centDirOffset > ZIP64_LIMIT or
centDirSize > ZIP64_LIMIT):
# Need to write the ZIP64 end-of-archive records
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
self.fp.flush()
finally:
fp = self.fp
self.fp = None
if not self._filePassed:
fp.close()
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def __init__(self, file, mode="r", compression=ZIP_STORED,
allowZip64=False, optimize=-1):
ZipFile.__init__(self, file, mode=mode, compression=compression,
allowZip64=allowZip64)
self._optimize = optimize
def writepy(self, pathname, basename=""):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyo or module.pyc.
This method will compile the module.py into module.pyc if
necessary.
"""
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print("Adding package in", pathname, "as", basename)
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename) # Recursive call
elif ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print("Adding files from directory", pathname)
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError(
'Files added with writepy() must end with ".py"')
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print("Adding file", arcname)
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
def _compile(file, optimize=-1):
import py_compile
if self.debug:
print("Compiling", file)
try:
py_compile.compile(file, doraise=True, optimize=optimize)
except py_compile.PyCompileError as err:
print(err.msg)
return False
return True
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
file_pyo = pathname + ".pyo"
pycache_pyc = imp.cache_from_source(file_py, True)
pycache_pyo = imp.cache_from_source(file_py, False)
if self._optimize == -1:
# legacy mode: use whatever file is present
if (os.path.isfile(file_pyo) and
os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyo file.
arcname = fname = file_pyo
elif (os.path.isfile(file_pyc) and
os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use .pyc file.
arcname = fname = file_pyc
elif (os.path.isfile(pycache_pyc) and
os.stat(pycache_pyc).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyc file, but write it to the legacy pyc
# file name in the archive.
fname = pycache_pyc
arcname = file_pyc
elif (os.path.isfile(pycache_pyo) and
os.stat(pycache_pyo).st_mtime >= os.stat(file_py).st_mtime):
# Use the __pycache__/*.pyo file, but write it to the legacy pyo
# file name in the archive.
fname = pycache_pyo
arcname = file_pyo
else:
# Compile py into PEP 3147 pyc file.
if _compile(file_py):
fname = (pycache_pyc if __debug__ else pycache_pyo)
arcname = (file_pyc if __debug__ else file_pyo)
else:
fname = arcname = file_py
else:
# new mode: use given optimization level
if self._optimize == 0:
fname = pycache_pyc
arcname = file_pyc
else:
fname = pycache_pyo
arcname = file_pyo
if not (os.path.isfile(fname) and
os.stat(fname).st_mtime >= os.stat(file_py).st_mtime):
if not _compile(file_py, optimize=self._optimize):
fname = arcname = file_py
archivename = os.path.split(arcname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
def main(args = None):
import textwrap
USAGE=textwrap.dedent("""\
Usage:
zipfile.py -l zipfile.zip # Show listing of a zipfile
zipfile.py -t zipfile.zip # Test if a zipfile is valid
zipfile.py -e zipfile.zip target # Extract zipfile into target dir
zipfile.py -c zipfile.zip src ... # Create zipfile from sources
""")
if args is None:
args = sys.argv[1:]
if not args or args[0] not in ('-l', '-c', '-e', '-t'):
print(USAGE)
sys.exit(1)
if args[0] == '-l':
if len(args) != 2:
print(USAGE)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
zf.printdir()
elif args[0] == '-t':
if len(args) != 2:
print(USAGE)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
badfile = zf.testzip()
if badfile:
print("The following enclosed file is corrupted: {!r}".format(badfile))
print("Done testing")
elif args[0] == '-e':
if len(args) != 3:
print(USAGE)
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
out = args[2]
for path in zf.namelist():
if path.startswith('./'):
tgt = os.path.join(out, path[2:])
else:
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
with open(tgt, 'wb') as fp:
fp.write(zf.read(path))
elif args[0] == '-c':
if len(args) < 3:
print(USAGE)
sys.exit(1)
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, ZIP_DEFLATED)
elif os.path.isdir(path):
for nm in os.listdir(path):
addToZip(zf,
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
with ZipFile(args[1], 'w', allowZip64=True) as zf:
for src in args[2:]:
addToZip(zf, src, os.path.basename(src))
if __name__ == "__main__":
main()
| gpl-3.0 |
double12gzh/nova | nova/api/openstack/compute/contrib/os_tenant_networks.py | 8 | 8472 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import netaddr.core as netexc
from oslo_config import cfg
from oslo_log import log as logging
import six
import webob
from webob import exc
from nova.api.openstack import extensions
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
import nova.network
from nova import quota
CONF = cfg.CONF
os_network_opts = [
cfg.BoolOpt("enable_network_quota",
default=False,
help='Enables or disables quota checking for tenant '
'networks'),
cfg.StrOpt('use_neutron_default_nets',
default="False",
help='Control for checking for default networks'),
cfg.StrOpt('neutron_default_tenant_id',
default="default",
help='Default tenant id when creating neutron '
'networks'),
cfg.IntOpt('quota_networks',
default=3,
help='Number of private networks allowed per project'),
]
CONF.register_opts(os_network_opts)
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'os-tenant-networks')
def network_dict(network):
# NOTE(danms): Here, network should be an object, which could have come
# from neutron and thus be missing most of the attributes. Providing a
# default to get() avoids trying to lazy-load missing attributes.
return {"id": network.get("uuid", None) or network.get("id", None),
"cidr": str(network.get("cidr", None)),
"label": network.get("label", None)}
class NetworkController(object):
def __init__(self, network_api=None):
self.network_api = nova.network.API()
self._default_networks = []
def _refresh_default_networks(self):
self._default_networks = []
if CONF.use_neutron_default_nets == "True":
try:
self._default_networks = self._get_default_networks()
except Exception:
LOG.exception(_LE("Failed to get default networks"))
def _get_default_networks(self):
project_id = CONF.neutron_default_tenant_id
ctx = nova_context.RequestContext(user_id=None,
project_id=project_id)
networks = {}
for n in self.network_api.get_all(ctx):
networks[n['id']] = n['label']
return [{'id': k, 'label': v} for k, v in six.iteritems(networks)]
def index(self, req):
context = req.environ['nova.context']
authorize(context)
networks = list(self.network_api.get_all(context))
if not self._default_networks:
self._refresh_default_networks()
networks.extend(self._default_networks)
return {'networks': [network_dict(n) for n in networks]}
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
network = self.network_api.get(context, id)
except exception.NetworkNotFound:
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
return {'network': network_dict(network)}
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
reservation = None
try:
if CONF.enable_network_quota:
reservation = QUOTAS.reserve(context, networks=-1)
except Exception:
reservation = None
LOG.exception(_LE("Failed to update usages deallocating "
"network."))
def _rollback_quota(reservation):
if CONF.enable_network_quota and reservation:
QUOTAS.rollback(context, reservation)
try:
self.network_api.delete(context, id)
except exception.PolicyNotAuthorized as e:
_rollback_quota(reservation)
raise exc.HTTPForbidden(explanation=six.text_type(e))
except exception.NetworkInUse as e:
_rollback_quota(reservation)
raise exc.HTTPConflict(explanation=e.format_message())
except exception.NetworkNotFound:
_rollback_quota(reservation)
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
if CONF.enable_network_quota and reservation:
QUOTAS.commit(context, reservation)
response = webob.Response(status_int=202)
return response
def create(self, req, body):
if not body:
raise exc.HTTPUnprocessableEntity()
context = req.environ["nova.context"]
authorize(context)
network = body["network"]
keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
"num_networks"]
kwargs = {k: network.get(k) for k in keys}
if not network.get("label"):
msg = _("Network label is required")
raise exc.HTTPBadRequest(explanation=msg)
label = network["label"]
if not (kwargs["cidr"] or kwargs["cidr_v6"]):
msg = _("No CIDR requested")
raise exc.HTTPBadRequest(explanation=msg)
if kwargs["cidr"]:
try:
net = netaddr.IPNetwork(kwargs["cidr"])
if net.size < 4:
msg = _("Requested network does not contain "
"enough (2+) usable hosts")
raise exc.HTTPBadRequest(explanation=msg)
except netexc.AddrFormatError:
msg = _("CIDR is malformed.")
raise exc.HTTPBadRequest(explanation=msg)
except netexc.AddrConversionError:
msg = _("Address could not be converted.")
raise exc.HTTPBadRequest(explanation=msg)
networks = []
try:
if CONF.enable_network_quota:
reservation = QUOTAS.reserve(context, networks=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many networks.")
raise exc.HTTPBadRequest(explanation=msg)
try:
networks = self.network_api.create(context,
label=label, **kwargs)
if CONF.enable_network_quota:
QUOTAS.commit(context, reservation)
except exception.PolicyNotAuthorized as e:
raise exc.HTTPForbidden(explanation=six.text_type(e))
except Exception:
if CONF.enable_network_quota:
QUOTAS.rollback(context, reservation)
msg = _("Create networks failed")
LOG.exception(msg, extra=network)
raise exc.HTTPServiceUnavailable(explanation=msg)
return {"network": network_dict(networks[0])}
class Os_tenant_networks(extensions.ExtensionDescriptor):
"""Tenant-based Network Management Extension."""
name = "OSTenantNetworks"
alias = "os-tenant-networks"
namespace = ("http://docs.openstack.org/compute/"
"ext/os-tenant-networks/api/v2")
updated = "2012-03-07T14:46:43Z"
def get_resources(self):
ext = extensions.ResourceExtension('os-tenant-networks',
NetworkController())
return [ext]
def _sync_networks(context, project_id, session):
ctx = nova_context.RequestContext(user_id=None, project_id=project_id)
ctx = ctx.elevated()
networks = nova.network.api.API().get_all(ctx)
return dict(networks=len(networks))
if CONF.enable_network_quota:
QUOTAS.register_resource(quota.ReservableResource('networks',
_sync_networks,
'quota_networks'))
| apache-2.0 |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.2/Lib/importlib/test/source/test_abc_loader.py | 51 | 31363 | import importlib
from importlib import abc
from .. import abc as testing_abc
from .. import util
from . import util as source_util
import imp
import inspect
import io
import marshal
import os
import sys
import types
import unittest
import warnings
class SourceOnlyLoaderMock(abc.SourceLoader):
# Globals that should be defined for all modules.
source = (b"_ = '::'.join([__name__, __file__, __cached__, __package__, "
b"repr(__loader__)])")
def __init__(self, path):
self.path = path
def get_data(self, path):
assert self.path == path
return self.source
def get_filename(self, fullname):
return self.path
class SourceLoaderMock(SourceOnlyLoaderMock):
source_mtime = 1
def __init__(self, path, magic=imp.get_magic()):
super().__init__(path)
self.bytecode_path = imp.cache_from_source(self.path)
data = bytearray(magic)
data.extend(marshal._w_long(self.source_mtime))
code_object = compile(self.source, self.path, 'exec',
dont_inherit=True)
data.extend(marshal.dumps(code_object))
self.bytecode = bytes(data)
self.written = {}
def get_data(self, path):
if path == self.path:
return super().get_data(path)
elif path == self.bytecode_path:
return self.bytecode
else:
raise IOError
def path_mtime(self, path):
assert path == self.path
return self.source_mtime
def set_data(self, path, data):
self.written[path] = bytes(data)
return path == self.bytecode_path
class PyLoaderMock(abc.PyLoader):
# Globals that should be defined for all modules.
source = (b"_ = '::'.join([__name__, __file__, __package__, "
b"repr(__loader__)])")
def __init__(self, data):
"""Take a dict of 'module_name: path' pairings.
Paths should have no file extension, allowing packages to be denoted by
ending in '__init__'.
"""
self.module_paths = data
self.path_to_module = {val:key for key,val in data.items()}
def get_data(self, path):
if path not in self.path_to_module:
raise IOError
return self.source
def is_package(self, name):
filename = os.path.basename(self.get_filename(name))
return os.path.splitext(filename)[0] == '__init__'
def source_path(self, name):
try:
return self.module_paths[name]
except KeyError:
raise ImportError
def get_filename(self, name):
"""Silence deprecation warning."""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
path = super().get_filename(name)
assert len(w) == 1
assert issubclass(w[0].category, PendingDeprecationWarning)
return path
class PyLoaderCompatMock(PyLoaderMock):
"""Mock that matches what is suggested to have a loader that is compatible
from Python 3.1 onwards."""
def get_filename(self, fullname):
try:
return self.module_paths[fullname]
except KeyError:
raise ImportError
def source_path(self, fullname):
try:
return self.get_filename(fullname)
except ImportError:
return None
class PyPycLoaderMock(abc.PyPycLoader, PyLoaderMock):
default_mtime = 1
def __init__(self, source, bc={}):
"""Initialize mock.
'bc' is a dict keyed on a module's name. The value is dict with
possible keys of 'path', 'mtime', 'magic', and 'bc'. Except for 'path',
each of those keys control if any part of created bytecode is to
deviate from default values.
"""
super().__init__(source)
self.module_bytecode = {}
self.path_to_bytecode = {}
self.bytecode_to_path = {}
for name, data in bc.items():
self.path_to_bytecode[data['path']] = name
self.bytecode_to_path[name] = data['path']
magic = data.get('magic', imp.get_magic())
mtime = importlib._w_long(data.get('mtime', self.default_mtime))
if 'bc' in data:
bc = data['bc']
else:
bc = self.compile_bc(name)
self.module_bytecode[name] = magic + mtime + bc
def compile_bc(self, name):
source_path = self.module_paths.get(name, '<test>') or '<test>'
code = compile(self.source, source_path, 'exec')
return marshal.dumps(code)
def source_mtime(self, name):
if name in self.module_paths:
return self.default_mtime
elif name in self.module_bytecode:
return None
else:
raise ImportError
def bytecode_path(self, name):
try:
return self.bytecode_to_path[name]
except KeyError:
if name in self.module_paths:
return None
else:
raise ImportError
def write_bytecode(self, name, bytecode):
self.module_bytecode[name] = bytecode
return True
def get_data(self, path):
if path in self.path_to_module:
return super().get_data(path)
elif path in self.path_to_bytecode:
name = self.path_to_bytecode[path]
return self.module_bytecode[name]
else:
raise IOError
def is_package(self, name):
try:
return super().is_package(name)
except TypeError:
return '__init__' in self.bytecode_to_path[name]
def get_code(self, name):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
code_object = super().get_code(name)
assert len(w) == 1
assert issubclass(w[0].category, PendingDeprecationWarning)
return code_object
class PyLoaderTests(testing_abc.LoaderTests):
"""Tests for importlib.abc.PyLoader."""
mocker = PyLoaderMock
def eq_attrs(self, ob, **kwargs):
for attr, val in kwargs.items():
found = getattr(ob, attr)
self.assertEqual(found, val,
"{} attribute: {} != {}".format(attr, found, val))
def test_module(self):
name = '<module>'
path = os.path.join('', 'path', 'to', 'module')
mock = self.mocker({name: path})
with util.uncache(name):
module = mock.load_module(name)
self.assertTrue(name in sys.modules)
self.eq_attrs(module, __name__=name, __file__=path, __package__='',
__loader__=mock)
self.assertTrue(not hasattr(module, '__path__'))
return mock, name
def test_package(self):
name = '<pkg>'
path = os.path.join('path', 'to', name, '__init__')
mock = self.mocker({name: path})
with util.uncache(name):
module = mock.load_module(name)
self.assertTrue(name in sys.modules)
self.eq_attrs(module, __name__=name, __file__=path,
__path__=[os.path.dirname(path)], __package__=name,
__loader__=mock)
return mock, name
def test_lacking_parent(self):
name = 'pkg.mod'
path = os.path.join('path', 'to', 'pkg', 'mod')
mock = self.mocker({name: path})
with util.uncache(name):
module = mock.load_module(name)
self.assertIn(name, sys.modules)
self.eq_attrs(module, __name__=name, __file__=path, __package__='pkg',
__loader__=mock)
self.assertFalse(hasattr(module, '__path__'))
return mock, name
def test_module_reuse(self):
name = 'mod'
path = os.path.join('path', 'to', 'mod')
module = imp.new_module(name)
mock = self.mocker({name: path})
with util.uncache(name):
sys.modules[name] = module
loaded_module = mock.load_module(name)
self.assertTrue(loaded_module is module)
self.assertTrue(sys.modules[name] is module)
return mock, name
def test_state_after_failure(self):
name = "mod"
module = imp.new_module(name)
module.blah = None
mock = self.mocker({name: os.path.join('path', 'to', 'mod')})
mock.source = b"1/0"
with util.uncache(name):
sys.modules[name] = module
with self.assertRaises(ZeroDivisionError):
mock.load_module(name)
self.assertTrue(sys.modules[name] is module)
self.assertTrue(hasattr(module, 'blah'))
return mock
def test_unloadable(self):
name = "mod"
mock = self.mocker({name: os.path.join('path', 'to', 'mod')})
mock.source = b"1/0"
with util.uncache(name):
with self.assertRaises(ZeroDivisionError):
mock.load_module(name)
self.assertTrue(name not in sys.modules)
return mock
class PyLoaderCompatTests(PyLoaderTests):
"""Test that the suggested code to make a loader that is compatible from
Python 3.1 forward works."""
mocker = PyLoaderCompatMock
class PyLoaderInterfaceTests(unittest.TestCase):
"""Tests for importlib.abc.PyLoader to make sure that when source_path()
doesn't return a path everything works as expected."""
def test_no_source_path(self):
# No source path should lead to ImportError.
name = 'mod'
mock = PyLoaderMock({})
with util.uncache(name), self.assertRaises(ImportError):
mock.load_module(name)
def test_source_path_is_None(self):
name = 'mod'
mock = PyLoaderMock({name: None})
with util.uncache(name), self.assertRaises(ImportError):
mock.load_module(name)
def test_get_filename_with_source_path(self):
# get_filename() should return what source_path() returns.
name = 'mod'
path = os.path.join('path', 'to', 'source')
mock = PyLoaderMock({name: path})
with util.uncache(name):
self.assertEqual(mock.get_filename(name), path)
def test_get_filename_no_source_path(self):
# get_filename() should raise ImportError if source_path returns None.
name = 'mod'
mock = PyLoaderMock({name: None})
with util.uncache(name), self.assertRaises(ImportError):
mock.get_filename(name)
class PyPycLoaderTests(PyLoaderTests):
"""Tests for importlib.abc.PyPycLoader."""
mocker = PyPycLoaderMock
@source_util.writes_bytecode_files
def verify_bytecode(self, mock, name):
assert name in mock.module_paths
self.assertIn(name, mock.module_bytecode)
magic = mock.module_bytecode[name][:4]
self.assertEqual(magic, imp.get_magic())
mtime = importlib._r_long(mock.module_bytecode[name][4:8])
self.assertEqual(mtime, 1)
bc = mock.module_bytecode[name][8:]
self.assertEqual(bc, mock.compile_bc(name))
def test_module(self):
mock, name = super().test_module()
self.verify_bytecode(mock, name)
def test_package(self):
mock, name = super().test_package()
self.verify_bytecode(mock, name)
def test_lacking_parent(self):
mock, name = super().test_lacking_parent()
self.verify_bytecode(mock, name)
def test_module_reuse(self):
mock, name = super().test_module_reuse()
self.verify_bytecode(mock, name)
def test_state_after_failure(self):
super().test_state_after_failure()
def test_unloadable(self):
super().test_unloadable()
class PyPycLoaderInterfaceTests(unittest.TestCase):
"""Test for the interface of importlib.abc.PyPycLoader."""
def get_filename_check(self, src_path, bc_path, expect):
name = 'mod'
mock = PyPycLoaderMock({name: src_path}, {name: {'path': bc_path}})
with util.uncache(name):
assert mock.source_path(name) == src_path
assert mock.bytecode_path(name) == bc_path
self.assertEqual(mock.get_filename(name), expect)
def test_filename_with_source_bc(self):
# When source and bytecode paths present, return the source path.
self.get_filename_check('source_path', 'bc_path', 'source_path')
def test_filename_with_source_no_bc(self):
# With source but no bc, return source path.
self.get_filename_check('source_path', None, 'source_path')
def test_filename_with_no_source_bc(self):
# With not source but bc, return the bc path.
self.get_filename_check(None, 'bc_path', 'bc_path')
def test_filename_with_no_source_or_bc(self):
# With no source or bc, raise ImportError.
name = 'mod'
mock = PyPycLoaderMock({name: None}, {name: {'path': None}})
with util.uncache(name), self.assertRaises(ImportError):
mock.get_filename(name)
class SkipWritingBytecodeTests(unittest.TestCase):
"""Test that bytecode is properly handled based on
sys.dont_write_bytecode."""
@source_util.writes_bytecode_files
def run_test(self, dont_write_bytecode):
name = 'mod'
mock = PyPycLoaderMock({name: os.path.join('path', 'to', 'mod')})
sys.dont_write_bytecode = dont_write_bytecode
with util.uncache(name):
mock.load_module(name)
self.assertTrue((name in mock.module_bytecode) is not
dont_write_bytecode)
def test_no_bytecode_written(self):
self.run_test(True)
def test_bytecode_written(self):
self.run_test(False)
class RegeneratedBytecodeTests(unittest.TestCase):
"""Test that bytecode is regenerated as expected."""
@source_util.writes_bytecode_files
def test_different_magic(self):
# A different magic number should lead to new bytecode.
name = 'mod'
bad_magic = b'\x00\x00\x00\x00'
assert bad_magic != imp.get_magic()
mock = PyPycLoaderMock({name: os.path.join('path', 'to', 'mod')},
{name: {'path': os.path.join('path', 'to',
'mod.bytecode'),
'magic': bad_magic}})
with util.uncache(name):
mock.load_module(name)
self.assertTrue(name in mock.module_bytecode)
magic = mock.module_bytecode[name][:4]
self.assertEqual(magic, imp.get_magic())
@source_util.writes_bytecode_files
def test_old_mtime(self):
# Bytecode with an older mtime should be regenerated.
name = 'mod'
old_mtime = PyPycLoaderMock.default_mtime - 1
mock = PyPycLoaderMock({name: os.path.join('path', 'to', 'mod')},
{name: {'path': 'path/to/mod.bytecode', 'mtime': old_mtime}})
with util.uncache(name):
mock.load_module(name)
self.assertTrue(name in mock.module_bytecode)
mtime = importlib._r_long(mock.module_bytecode[name][4:8])
self.assertEqual(mtime, PyPycLoaderMock.default_mtime)
class BadBytecodeFailureTests(unittest.TestCase):
"""Test import failures when there is no source and parts of the bytecode
is bad."""
def test_bad_magic(self):
# A bad magic number should lead to an ImportError.
name = 'mod'
bad_magic = b'\x00\x00\x00\x00'
bc = {name:
{'path': os.path.join('path', 'to', 'mod'),
'magic': bad_magic}}
mock = PyPycLoaderMock({name: None}, bc)
with util.uncache(name), self.assertRaises(ImportError):
mock.load_module(name)
def test_no_bytecode(self):
# Missing code object bytecode should lead to an EOFError.
name = 'mod'
bc = {name: {'path': os.path.join('path', 'to', 'mod'), 'bc': b''}}
mock = PyPycLoaderMock({name: None}, bc)
with util.uncache(name), self.assertRaises(EOFError):
mock.load_module(name)
def test_bad_bytecode(self):
# Malformed code object bytecode should lead to a ValueError.
name = 'mod'
bc = {name: {'path': os.path.join('path', 'to', 'mod'), 'bc': b'1234'}}
mock = PyPycLoaderMock({name: None}, bc)
with util.uncache(name), self.assertRaises(ValueError):
mock.load_module(name)
def raise_ImportError(*args, **kwargs):
raise ImportError
class MissingPathsTests(unittest.TestCase):
"""Test what happens when a source or bytecode path does not exist (either
from *_path returning None or raising ImportError)."""
def test_source_path_None(self):
# Bytecode should be used when source_path returns None, along with
# __file__ being set to the bytecode path.
name = 'mod'
bytecode_path = 'path/to/mod'
mock = PyPycLoaderMock({name: None}, {name: {'path': bytecode_path}})
with util.uncache(name):
module = mock.load_module(name)
self.assertEqual(module.__file__, bytecode_path)
# Testing for bytecode_path returning None handled by all tests where no
# bytecode initially exists.
def test_all_paths_None(self):
# If all *_path methods return None, raise ImportError.
name = 'mod'
mock = PyPycLoaderMock({name: None})
with util.uncache(name), self.assertRaises(ImportError):
mock.load_module(name)
def test_source_path_ImportError(self):
# An ImportError from source_path should trigger an ImportError.
name = 'mod'
mock = PyPycLoaderMock({}, {name: {'path': os.path.join('path', 'to',
'mod')}})
with util.uncache(name), self.assertRaises(ImportError):
mock.load_module(name)
def test_bytecode_path_ImportError(self):
# An ImportError from bytecode_path should trigger an ImportError.
name = 'mod'
mock = PyPycLoaderMock({name: os.path.join('path', 'to', 'mod')})
bad_meth = types.MethodType(raise_ImportError, mock)
mock.bytecode_path = bad_meth
with util.uncache(name), self.assertRaises(ImportError):
mock.load_module(name)
class SourceLoaderTestHarness(unittest.TestCase):
def setUp(self, *, is_package=True, **kwargs):
self.package = 'pkg'
if is_package:
self.path = os.path.join(self.package, '__init__.py')
self.name = self.package
else:
module_name = 'mod'
self.path = os.path.join(self.package, '.'.join(['mod', 'py']))
self.name = '.'.join([self.package, module_name])
self.cached = imp.cache_from_source(self.path)
self.loader = self.loader_mock(self.path, **kwargs)
def verify_module(self, module):
self.assertEqual(module.__name__, self.name)
self.assertEqual(module.__file__, self.path)
self.assertEqual(module.__cached__, self.cached)
self.assertEqual(module.__package__, self.package)
self.assertEqual(module.__loader__, self.loader)
values = module._.split('::')
self.assertEqual(values[0], self.name)
self.assertEqual(values[1], self.path)
self.assertEqual(values[2], self.cached)
self.assertEqual(values[3], self.package)
self.assertEqual(values[4], repr(self.loader))
def verify_code(self, code_object):
module = imp.new_module(self.name)
module.__file__ = self.path
module.__cached__ = self.cached
module.__package__ = self.package
module.__loader__ = self.loader
module.__path__ = []
exec(code_object, module.__dict__)
self.verify_module(module)
class SourceOnlyLoaderTests(SourceLoaderTestHarness):
"""Test importlib.abc.SourceLoader for source-only loading.
Reload testing is subsumed by the tests for
importlib.util.module_for_loader.
"""
loader_mock = SourceOnlyLoaderMock
def test_get_source(self):
# Verify the source code is returned as a string.
# If an IOError is raised by get_data then raise ImportError.
expected_source = self.loader.source.decode('utf-8')
self.assertEqual(self.loader.get_source(self.name), expected_source)
def raise_IOError(path):
raise IOError
self.loader.get_data = raise_IOError
with self.assertRaises(ImportError):
self.loader.get_source(self.name)
def test_is_package(self):
# Properly detect when loading a package.
self.setUp(is_package=True)
self.assertTrue(self.loader.is_package(self.name))
self.setUp(is_package=False)
self.assertFalse(self.loader.is_package(self.name))
def test_get_code(self):
# Verify the code object is created.
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
def test_load_module(self):
# Loading a module should set __name__, __loader__, __package__,
# __path__ (for packages), __file__, and __cached__.
# The module should also be put into sys.modules.
with util.uncache(self.name):
module = self.loader.load_module(self.name)
self.verify_module(module)
self.assertEqual(module.__path__, [os.path.dirname(self.path)])
self.assertTrue(self.name in sys.modules)
def test_package_settings(self):
# __package__ needs to be set, while __path__ is set on if the module
# is a package.
# Testing the values for a package are covered by test_load_module.
self.setUp(is_package=False)
with util.uncache(self.name):
module = self.loader.load_module(self.name)
self.verify_module(module)
self.assertTrue(not hasattr(module, '__path__'))
def test_get_source_encoding(self):
# Source is considered encoded in UTF-8 by default unless otherwise
# specified by an encoding line.
source = "_ = 'ü'"
self.loader.source = source.encode('utf-8')
returned_source = self.loader.get_source(self.name)
self.assertEqual(returned_source, source)
source = "# coding: latin-1\n_ = ü"
self.loader.source = source.encode('latin-1')
returned_source = self.loader.get_source(self.name)
self.assertEqual(returned_source, source)
@unittest.skipIf(sys.dont_write_bytecode, "sys.dont_write_bytecode is true")
class SourceLoaderBytecodeTests(SourceLoaderTestHarness):
"""Test importlib.abc.SourceLoader's use of bytecode.
Source-only testing handled by SourceOnlyLoaderTests.
"""
loader_mock = SourceLoaderMock
def verify_code(self, code_object, *, bytecode_written=False):
super().verify_code(code_object)
if bytecode_written:
self.assertIn(self.cached, self.loader.written)
data = bytearray(imp.get_magic())
data.extend(marshal._w_long(self.loader.source_mtime))
data.extend(marshal.dumps(code_object))
self.assertEqual(self.loader.written[self.cached], bytes(data))
def test_code_with_everything(self):
# When everything should work.
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
def test_no_bytecode(self):
# If no bytecode exists then move on to the source.
self.loader.bytecode_path = "<does not exist>"
# Sanity check
with self.assertRaises(IOError):
bytecode_path = imp.cache_from_source(self.path)
self.loader.get_data(bytecode_path)
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
def test_code_bad_timestamp(self):
# Bytecode is only used when the timestamp matches the source EXACTLY.
for source_mtime in (0, 2):
assert source_mtime != self.loader.source_mtime
original = self.loader.source_mtime
self.loader.source_mtime = source_mtime
# If bytecode is used then EOFError would be raised by marshal.
self.loader.bytecode = self.loader.bytecode[8:]
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
self.loader.source_mtime = original
def test_code_bad_magic(self):
# Skip over bytecode with a bad magic number.
self.setUp(magic=b'0000')
# If bytecode is used then EOFError would be raised by marshal.
self.loader.bytecode = self.loader.bytecode[8:]
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
def test_dont_write_bytecode(self):
# Bytecode is not written if sys.dont_write_bytecode is true.
# Can assume it is false already thanks to the skipIf class decorator.
try:
sys.dont_write_bytecode = True
self.loader.bytecode_path = "<does not exist>"
code_object = self.loader.get_code(self.name)
self.assertNotIn(self.cached, self.loader.written)
finally:
sys.dont_write_bytecode = False
def test_no_set_data(self):
# If set_data is not defined, one can still read bytecode.
self.setUp(magic=b'0000')
original_set_data = self.loader.__class__.set_data
try:
del self.loader.__class__.set_data
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
finally:
self.loader.__class__.set_data = original_set_data
def test_set_data_raises_exceptions(self):
# Raising NotImplementedError or IOError is okay for set_data.
def raise_exception(exc):
def closure(*args, **kwargs):
raise exc
return closure
self.setUp(magic=b'0000')
self.loader.set_data = raise_exception(NotImplementedError)
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
class SourceLoaderGetSourceTests(unittest.TestCase):
"""Tests for importlib.abc.SourceLoader.get_source()."""
def test_default_encoding(self):
# Should have no problems with UTF-8 text.
name = 'mod'
mock = SourceOnlyLoaderMock('mod.file')
source = 'x = "ü"'
mock.source = source.encode('utf-8')
returned_source = mock.get_source(name)
self.assertEqual(returned_source, source)
def test_decoded_source(self):
# Decoding should work.
name = 'mod'
mock = SourceOnlyLoaderMock("mod.file")
source = "# coding: Latin-1\nx='ü'"
assert source.encode('latin-1') != source.encode('utf-8')
mock.source = source.encode('latin-1')
returned_source = mock.get_source(name)
self.assertEqual(returned_source, source)
def test_universal_newlines(self):
# PEP 302 says universal newlines should be used.
name = 'mod'
mock = SourceOnlyLoaderMock('mod.file')
source = "x = 42\r\ny = -13\r\n"
mock.source = source.encode('utf-8')
expect = io.IncrementalNewlineDecoder(None, True).decode(source)
self.assertEqual(mock.get_source(name), expect)
class AbstractMethodImplTests(unittest.TestCase):
"""Test the concrete abstractmethod implementations."""
class Loader(abc.Loader):
def load_module(self, fullname):
super().load_module(fullname)
class Finder(abc.Finder):
def find_module(self, _):
super().find_module(_)
class ResourceLoader(Loader, abc.ResourceLoader):
def get_data(self, _):
super().get_data(_)
class InspectLoader(Loader, abc.InspectLoader):
def is_package(self, _):
super().is_package(_)
def get_code(self, _):
super().get_code(_)
def get_source(self, _):
super().get_source(_)
class ExecutionLoader(InspectLoader, abc.ExecutionLoader):
def get_filename(self, _):
super().get_filename(_)
class SourceLoader(ResourceLoader, ExecutionLoader, abc.SourceLoader):
pass
class PyLoader(ResourceLoader, InspectLoader, abc.PyLoader):
def source_path(self, _):
super().source_path(_)
class PyPycLoader(PyLoader, abc.PyPycLoader):
def bytecode_path(self, _):
super().bytecode_path(_)
def source_mtime(self, _):
super().source_mtime(_)
def write_bytecode(self, _, _2):
super().write_bytecode(_, _2)
def raises_NotImplementedError(self, ins, *args):
for method_name in args:
method = getattr(ins, method_name)
arg_count = len(inspect.getfullargspec(method)[0]) - 1
args = [''] * arg_count
try:
method(*args)
except NotImplementedError:
pass
else:
msg = "{}.{} did not raise NotImplementedError"
self.fail(msg.format(ins.__class__.__name__, method_name))
def test_Loader(self):
self.raises_NotImplementedError(self.Loader(), 'load_module')
# XXX misplaced; should be somewhere else
def test_Finder(self):
self.raises_NotImplementedError(self.Finder(), 'find_module')
def test_ResourceLoader(self):
self.raises_NotImplementedError(self.ResourceLoader(), 'load_module',
'get_data')
def test_InspectLoader(self):
self.raises_NotImplementedError(self.InspectLoader(), 'load_module',
'is_package', 'get_code', 'get_source')
def test_ExecutionLoader(self):
self.raises_NotImplementedError(self.ExecutionLoader(), 'load_module',
'is_package', 'get_code', 'get_source',
'get_filename')
def test_SourceLoader(self):
ins = self.SourceLoader()
# Required abstractmethods.
self.raises_NotImplementedError(ins, 'get_filename', 'get_data')
# Optional abstractmethods.
self.raises_NotImplementedError(ins,'path_mtime', 'set_data')
def test_PyLoader(self):
self.raises_NotImplementedError(self.PyLoader(), 'source_path',
'get_data', 'is_package')
def test_PyPycLoader(self):
self.raises_NotImplementedError(self.PyPycLoader(), 'source_path',
'source_mtime', 'bytecode_path',
'write_bytecode')
def test_main():
from test.support import run_unittest
run_unittest(PyLoaderTests, PyLoaderCompatTests,
PyLoaderInterfaceTests,
PyPycLoaderTests, PyPycLoaderInterfaceTests,
SkipWritingBytecodeTests, RegeneratedBytecodeTests,
BadBytecodeFailureTests, MissingPathsTests,
SourceOnlyLoaderTests,
SourceLoaderBytecodeTests,
SourceLoaderGetSourceTests,
AbstractMethodImplTests)
if __name__ == '__main__':
test_main()
| mit |
cccfran/sympy | sympy/polys/tests/test_modulargcd.py | 125 | 9007 | from sympy.polys.rings import ring
from sympy.polys.domains import ZZ, QQ, AlgebraicField
from sympy.polys.modulargcd import (
modgcd_univariate,
modgcd_bivariate,
_chinese_remainder_reconstruction_multivariate,
modgcd_multivariate,
_to_ZZ_poly,
_to_ANP_poly,
func_field_modgcd,
_func_field_modgcd_m)
from sympy import sqrt
def test_modgcd_univariate_integers():
R, x = ring("x", ZZ)
f, g = R.zero, R.zero
assert modgcd_univariate(f, g) == (0, 0, 0)
f, g = R.zero, x
assert modgcd_univariate(f, g) == (x, 0, 1)
assert modgcd_univariate(g, f) == (x, 1, 0)
f, g = R.zero, -x
assert modgcd_univariate(f, g) == (x, 0, -1)
assert modgcd_univariate(g, f) == (x, -1, 0)
f, g = 2*x, R(2)
assert modgcd_univariate(f, g) == (2, x, 1)
f, g = 2*x + 2, 6*x**2 - 6
assert modgcd_univariate(f, g) == (2*x + 2, 1, 3*x - 3)
f = x**4 + 8*x**3 + 21*x**2 + 22*x + 8
g = x**3 + 6*x**2 + 11*x + 6
h = x**2 + 3*x + 2
cff = x**2 + 5*x + 4
cfg = x + 3
assert modgcd_univariate(f, g) == (h, cff, cfg)
f = x**4 - 4
g = x**4 + 4*x**2 + 4
h = x**2 + 2
cff = x**2 - 2
cfg = x**2 + 2
assert modgcd_univariate(f, g) == (h, cff, cfg)
f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
h = 1
cff = f
cfg = g
assert modgcd_univariate(f, g) == (h, cff, cfg)
f = - 352518131239247345597970242177235495263669787845475025293906825864749649589178600387510272*x**49 \
+ 46818041807522713962450042363465092040687472354933295397472942006618953623327997952*x**42 \
+ 378182690892293941192071663536490788434899030680411695933646320291525827756032*x**35 \
+ 112806468807371824947796775491032386836656074179286744191026149539708928*x**28 \
- 12278371209708240950316872681744825481125965781519138077173235712*x**21 \
+ 289127344604779611146960547954288113529690984687482920704*x**14 \
+ 19007977035740498977629742919480623972236450681*x**7 \
+ 311973482284542371301330321821976049
g = 365431878023781158602430064717380211405897160759702125019136*x**21 \
+ 197599133478719444145775798221171663643171734081650688*x**14 \
- 9504116979659010018253915765478924103928886144*x**7 \
- 311973482284542371301330321821976049
assert modgcd_univariate(f, f.diff(x))[0] == g
f = 1317378933230047068160*x + 2945748836994210856960
g = 120352542776360960*x + 269116466014453760
h = 120352542776360960*x + 269116466014453760
cff = 10946
cfg = 1
assert modgcd_univariate(f, g) == (h, cff, cfg)
def test_modgcd_bivariate_integers():
R, x, y = ring("x,y", ZZ)
f, g = R.zero, R.zero
assert modgcd_bivariate(f, g) == (0, 0, 0)
f, g = 2*x, R(2)
assert modgcd_bivariate(f, g) == (2, x, 1)
f, g = x + 2*y, x + y
assert modgcd_bivariate(f, g) == (1, f, g)
f, g = x**2 + 2*x*y + y**2, x**3 + y**3
assert modgcd_bivariate(f, g) == (x + y, x + y, x**2 - x*y + y**2)
f, g = x*y**2 + 2*x*y + x, x*y**3 + x
assert modgcd_bivariate(f, g) == (x*y + x, y + 1, y**2 - y + 1)
f, g = x**2*y**2 + x**2*y + 1, x*y**2 + x*y + 1
assert modgcd_bivariate(f, g) == (1, f, g)
f = 2*x*y**2 + 4*x*y + 2*x + y**2 + 2*y + 1
g = 2*x*y**3 + 2*x + y**3 + 1
assert modgcd_bivariate(f, g) == (2*x*y + 2*x + y + 1, y + 1, y**2 - y + 1)
f, g = 2*x**2 + 4*x + 2, x + 1
assert modgcd_bivariate(f, g) == (x + 1, 2*x + 2, 1)
f, g = x + 1, 2*x**2 + 4*x + 2
assert modgcd_bivariate(f, g) == (x + 1, 1, 2*x + 2)
f = 2*x**2 + 4*x*y - 2*x - 4*y
g = x**2 + x - 2
assert modgcd_bivariate(f, g) == (x - 1, 2*x + 4*y, x + 2)
f = 2*x**2 + 2*x*y - 3*x - 3*y
g = 4*x*y - 2*x + 4*y**2 - 2*y
assert modgcd_bivariate(f, g) == (x + y, 2*x - 3, 4*y - 2)
def test_chinese_remainder():
R, x, y = ring("x, y", ZZ)
p, q = 3, 5
hp = x**3*y - x**2 - 1
hq = -x**3*y - 2*x*y**2 + 2
hpq = _chinese_remainder_reconstruction_multivariate(hp, hq, p, q)
assert hpq.trunc_ground(p) == hp
assert hpq.trunc_ground(q) == hq
T, z = ring("z", R)
p, q = 3, 7
hp = (x*y + 1)*z**2 + x
hq = (x**2 - 3*y)*z + 2
hpq = _chinese_remainder_reconstruction_multivariate(hp, hq, p, q)
assert hpq.trunc_ground(p) == hp
assert hpq.trunc_ground(q) == hq
def test_modgcd_multivariate_integers():
R, x, y = ring("x,y", ZZ)
f, g = R.zero, R.zero
assert modgcd_multivariate(f, g) == (0, 0, 0)
f, g = 2*x**2 + 4*x + 2, x + 1
assert modgcd_multivariate(f, g) == (x + 1, 2*x + 2, 1)
f, g = x + 1, 2*x**2 + 4*x + 2
assert modgcd_multivariate(f, g) == (x + 1, 1, 2*x + 2)
f = 2*x**2 + 2*x*y - 3*x - 3*y
g = 4*x*y - 2*x + 4*y**2 - 2*y
assert modgcd_multivariate(f, g) == (x + y, 2*x - 3, 4*y - 2)
f, g = x*y**2 + 2*x*y + x, x*y**3 + x
assert modgcd_multivariate(f, g) == (x*y + x, y + 1, y**2 - y + 1)
f, g = x**2*y**2 + x**2*y + 1, x*y**2 + x*y + 1
assert modgcd_multivariate(f, g) == (1, f, g)
f = x**4 + 8*x**3 + 21*x**2 + 22*x + 8
g = x**3 + 6*x**2 + 11*x + 6
h = x**2 + 3*x + 2
cff = x**2 + 5*x + 4
cfg = x + 3
assert modgcd_multivariate(f, g) == (h, cff, cfg)
R, x, y, z, u = ring("x,y,z,u", ZZ)
f, g = x + y + z, -x - y - z - u
assert modgcd_multivariate(f, g) == (1, f, g)
f, g = u**2 + 2*u + 1, 2*u + 2
assert modgcd_multivariate(f, g) == (u + 1, u + 1, 2)
f, g = z**2*u**2 + 2*z**2*u + z**2 + z*u + z, u**2 + 2*u + 1
h, cff, cfg = u + 1, z**2*u + z**2 + z, u + 1
assert modgcd_multivariate(f, g) == (h, cff, cfg)
assert modgcd_multivariate(g, f) == (h, cfg, cff)
R, x, y, z = ring("x,y,z", ZZ)
f, g = x - y*z, x - y*z
assert modgcd_multivariate(f, g) == (x - y*z, 1, 1)
f, g, h = R.fateman_poly_F_1()
H, cff, cfg = modgcd_multivariate(f, g)
assert H == h and H*cff == f and H*cfg == g
R, x, y, z, u, v = ring("x,y,z,u,v", ZZ)
f, g, h = R.fateman_poly_F_1()
H, cff, cfg = modgcd_multivariate(f, g)
assert H == h and H*cff == f and H*cfg == g
R, x, y, z, u, v, a, b = ring("x,y,z,u,v,a,b", ZZ)
f, g, h = R.fateman_poly_F_1()
H, cff, cfg = modgcd_multivariate(f, g)
assert H == h and H*cff == f and H*cfg == g
R, x, y, z, u, v, a, b, c, d = ring("x,y,z,u,v,a,b,c,d", ZZ)
f, g, h = R.fateman_poly_F_1()
H, cff, cfg = modgcd_multivariate(f, g)
assert H == h and H*cff == f and H*cfg == g
R, x, y, z = ring("x,y,z", ZZ)
f, g, h = R.fateman_poly_F_2()
H, cff, cfg = modgcd_multivariate(f, g)
assert H == h and H*cff == f and H*cfg == g
f, g, h = R.fateman_poly_F_3()
H, cff, cfg = modgcd_multivariate(f, g)
assert H == h and H*cff == f and H*cfg == g
R, x, y, z, t = ring("x,y,z,t", ZZ)
f, g, h = R.fateman_poly_F_3()
H, cff, cfg = modgcd_multivariate(f, g)
assert H == h and H*cff == f and H*cfg == g
def test_to_ZZ_ANP_poly():
A = AlgebraicField(QQ, sqrt(2))
R, x = ring("x", A)
f = x*(sqrt(2) + 1)
T, x_, z_ = ring("x_, z_", ZZ)
f_ = x_*z_ + x_
assert _to_ZZ_poly(f, T) == f_
assert _to_ANP_poly(f_, R) == f
R, x, t, s = ring("x, t, s", A)
f = x*t**2 + x*s + sqrt(2)
D, t_, s_ = ring("t_, s_", ZZ)
T, x_, z_ = ring("x_, z_", D)
f_ = (t_**2 + s_)*x_ + z_
assert _to_ZZ_poly(f, T) == f_
assert _to_ANP_poly(f_, R) == f
def test_modgcd_algebraic_field():
A = AlgebraicField(QQ, sqrt(2))
R, x = ring("x", A)
one = A.one
f, g = 2*x, R(2)
assert func_field_modgcd(f, g) == (one, f, g)
f, g = 2*x, R(sqrt(2))
assert func_field_modgcd(f, g) == (one, f, g)
f, g = 2*x + 2, 6*x**2 - 6
assert func_field_modgcd(f, g) == (x + 1, R(2), 6*x - 6)
R, x, y = ring("x, y", A)
f, g = x + sqrt(2)*y, x + y
assert func_field_modgcd(f, g) == (one, f, g)
f, g = x*y + sqrt(2)*y**2, R(sqrt(2))*y
assert func_field_modgcd(f, g) == (y, x + sqrt(2)*y, R(sqrt(2)))
f, g = x**2 + 2*sqrt(2)*x*y + 2*y**2, x + sqrt(2)*y
assert func_field_modgcd(f, g) == (g, g, one)
A = AlgebraicField(QQ, sqrt(2), sqrt(3))
R, x, y, z = ring("x, y, z", A)
h = x**2*y**7 + sqrt(6)/21*z
f, g = h*(27*y**3 + 1), h*(y + x)
assert func_field_modgcd(f, g) == (h, 27*y**3+1, y+x)
h = x**13*y**3 + 1/2*x**10 + 1/sqrt(2)
f, g = h*(x + 1), h*sqrt(2)/sqrt(3)
assert func_field_modgcd(f, g) == (h, x + 1, R(sqrt(2)/sqrt(3)))
A = AlgebraicField(QQ, sqrt(2)**(-1)*sqrt(3))
R, x = ring("x", A)
f, g = x + 1, x - 1
assert func_field_modgcd(f, g) == (A.one, f, g)
# when func_field_modgcd suppors function fields, this test can be changed
def test_modgcd_func_field():
D, t = ring("t", ZZ)
R, x, z = ring("x, z", D)
minpoly = (z**2*t**2 + z**2*t - 1).drop(0)
f, g = x + 1, x - 1
assert _func_field_modgcd_m(f, g, minpoly) == R.one
| bsd-3-clause |
e-q/scipy | scipy/sparse/linalg/tests/test_pydata_sparse.py | 19 | 5954 | import pytest
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as splin
from numpy.testing import assert_allclose
try:
import sparse
except Exception:
sparse = None
pytestmark = pytest.mark.skipif(sparse is None,
reason="pydata/sparse not installed")
msg = "pydata/sparse (0.8) does not implement necessary operations"
sparse_params = [pytest.param("COO"),
pytest.param("DOK", marks=[pytest.mark.xfail(reason=msg)])]
scipy_sparse_classes = [
sp.bsr_matrix,
sp.csr_matrix,
sp.coo_matrix,
sp.csc_matrix,
sp.dia_matrix,
sp.dok_matrix
]
@pytest.fixture(params=sparse_params)
def sparse_cls(request):
return getattr(sparse, request.param)
@pytest.fixture(params=scipy_sparse_classes)
def sp_sparse_cls(request):
return request.param
@pytest.fixture
def same_matrix(sparse_cls, sp_sparse_cls):
np.random.seed(1234)
A_dense = np.random.rand(9, 9)
return sp_sparse_cls(A_dense), sparse_cls(A_dense)
@pytest.fixture
def matrices(sparse_cls):
np.random.seed(1234)
A_dense = np.random.rand(9, 9)
A_dense = A_dense @ A_dense.T
A_sparse = sparse_cls(A_dense)
b = np.random.rand(9)
return A_dense, A_sparse, b
def test_isolve_gmres(matrices):
# Several of the iterative solvers use the same
# isolve.utils.make_system wrapper code, so test just one of them.
A_dense, A_sparse, b = matrices
x, info = splin.gmres(A_sparse, b, atol=1e-15)
assert info == 0
assert isinstance(x, np.ndarray)
assert_allclose(A_sparse @ x, b)
def test_lsmr(matrices):
A_dense, A_sparse, b = matrices
res0 = splin.lsmr(A_dense, b)
res = splin.lsmr(A_sparse, b)
assert_allclose(res[0], res0[0], atol=1.8e-5)
def test_lsqr(matrices):
A_dense, A_sparse, b = matrices
res0 = splin.lsqr(A_dense, b)
res = splin.lsqr(A_sparse, b)
assert_allclose(res[0], res0[0], atol=1e-5)
def test_eigs(matrices):
A_dense, A_sparse, v0 = matrices
M_dense = np.diag(v0**2)
M_sparse = A_sparse.__class__(M_dense)
w_dense, v_dense = splin.eigs(A_dense, k=3, v0=v0)
w, v = splin.eigs(A_sparse, k=3, v0=v0)
assert_allclose(w, w_dense)
assert_allclose(v, v_dense)
for M in [M_sparse, M_dense]:
w_dense, v_dense = splin.eigs(A_dense, M=M_dense, k=3, v0=v0)
w, v = splin.eigs(A_sparse, M=M, k=3, v0=v0)
assert_allclose(w, w_dense)
assert_allclose(v, v_dense)
w_dense, v_dense = splin.eigsh(A_dense, M=M_dense, k=3, v0=v0)
w, v = splin.eigsh(A_sparse, M=M, k=3, v0=v0)
assert_allclose(w, w_dense)
assert_allclose(v, v_dense)
def test_svds(matrices):
A_dense, A_sparse, v0 = matrices
u0, s0, vt0 = splin.svds(A_dense, k=2, v0=v0)
u, s, vt = splin.svds(A_sparse, k=2, v0=v0)
assert_allclose(s, s0)
assert_allclose(u, u0)
assert_allclose(vt, vt0)
def test_lobpcg(matrices):
A_dense, A_sparse, x = matrices
X = x[:,None]
w_dense, v_dense = splin.lobpcg(A_dense, X)
w, v = splin.lobpcg(A_sparse, X)
assert_allclose(w, w_dense)
assert_allclose(v, v_dense)
def test_spsolve(matrices):
A_dense, A_sparse, b = matrices
b2 = np.random.rand(len(b), 3)
x0 = splin.spsolve(sp.csc_matrix(A_dense), b)
x = splin.spsolve(A_sparse, b)
assert isinstance(x, np.ndarray)
assert_allclose(x, x0)
x0 = splin.spsolve(sp.csc_matrix(A_dense), b)
x = splin.spsolve(A_sparse, b, use_umfpack=True)
assert isinstance(x, np.ndarray)
assert_allclose(x, x0)
x0 = splin.spsolve(sp.csc_matrix(A_dense), b2)
x = splin.spsolve(A_sparse, b2)
assert isinstance(x, np.ndarray)
assert_allclose(x, x0)
x0 = splin.spsolve(sp.csc_matrix(A_dense),
sp.csc_matrix(A_dense))
x = splin.spsolve(A_sparse, A_sparse)
assert isinstance(x, type(A_sparse))
assert_allclose(x.todense(), x0.todense())
def test_splu(matrices):
A_dense, A_sparse, b = matrices
n = len(b)
sparse_cls = type(A_sparse)
lu = splin.splu(A_sparse)
assert isinstance(lu.L, sparse_cls)
assert isinstance(lu.U, sparse_cls)
Pr = sparse_cls(sp.csc_matrix((np.ones(n), (lu.perm_r, np.arange(n)))))
Pc = sparse_cls(sp.csc_matrix((np.ones(n), (np.arange(n), lu.perm_c))))
A2 = Pr.T @ lu.L @ lu.U @ Pc.T
assert_allclose(A2.todense(), A_sparse.todense())
z = lu.solve(A_sparse.todense())
assert_allclose(z, np.eye(n), atol=1e-10)
def test_spilu(matrices):
A_dense, A_sparse, b = matrices
sparse_cls = type(A_sparse)
lu = splin.spilu(A_sparse)
assert isinstance(lu.L, sparse_cls)
assert isinstance(lu.U, sparse_cls)
z = lu.solve(A_sparse.todense())
assert_allclose(z, np.eye(len(b)), atol=1e-3)
def test_spsolve_triangular(matrices):
A_dense, A_sparse, b = matrices
A_sparse = sparse.tril(A_sparse)
x = splin.spsolve_triangular(A_sparse, b)
assert_allclose(A_sparse @ x, b)
def test_onenormest(matrices):
A_dense, A_sparse, b = matrices
est0 = splin.onenormest(A_dense)
est = splin.onenormest(A_sparse)
assert_allclose(est, est0)
def test_inv(matrices):
A_dense, A_sparse, b = matrices
x0 = splin.inv(sp.csc_matrix(A_dense))
x = splin.inv(A_sparse)
assert_allclose(x.todense(), x0.todense())
def test_expm(matrices):
A_dense, A_sparse, b = matrices
x0 = splin.expm(sp.csc_matrix(A_dense))
x = splin.expm(A_sparse)
assert_allclose(x.todense(), x0.todense())
def test_expm_multiply(matrices):
A_dense, A_sparse, b = matrices
x0 = splin.expm_multiply(A_dense, b)
x = splin.expm_multiply(A_sparse, b)
assert_allclose(x, x0)
def test_eq(same_matrix):
sp_sparse, pd_sparse = same_matrix
assert (sp_sparse == pd_sparse).all()
def test_ne(same_matrix):
sp_sparse, pd_sparse = same_matrix
assert not (sp_sparse != pd_sparse).any()
| bsd-3-clause |
bright-sparks/chromium-spacewalk | chrome/browser/metrics/variations/generate_resources_map_unittest.py | 16 | 3094 | #!/usr/bin/python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for generate_resources_map.py"""
import unittest
import generate_resources_map
class GenerateResourcesMapUnittest(unittest.TestCase):
TEST_INPUT = """
// This file is automatically generated by GRIT. Do not edit.
#pragma once
#define IDS_BOOKMARKS_NO_ITEMS 12500
#define IDS_BOOKMARK_BAR_IMPORT_LINK 12501
#define IDS_BOOKMARK_GROUP_FROM_IE 12502
#define IDS_BOOKMARK_GROUP_FROM_FIREFOX 12503
"""
def testGetResourceListFromString(self):
expected_tuples = [(301430091, "IDS_BOOKMARKS_NO_ITEMS", "12500"),
(2654138887, "IDS_BOOKMARK_BAR_IMPORT_LINK", "12501"),
(2894469061, "IDS_BOOKMARK_GROUP_FROM_IE", "12502"),
(3847176170, "IDS_BOOKMARK_GROUP_FROM_FIREFOX", "12503")]
expected = [generate_resources_map.Resource(*t) for t in expected_tuples]
actual_tuples = generate_resources_map._GetResourceListFromString(
self.TEST_INPUT)
self.assertEqual(expected_tuples, actual_tuples)
def testCheckForHashCollisions(self):
collisions_tuples = [(123, "IDS_FOO", "12500"),
(456, "IDS_BAR", "12501"),
(456, "IDS_BAZ", "12502"),
(890, "IDS_QUX", "12503"),
(899, "IDS_NO", "12504"),
(899, "IDS_YES", "12505")]
list_with_collisions = [generate_resources_map.Resource(*t)
for t in collisions_tuples]
expected_collision_tuples = [(456, "IDS_BAR", "12501"),
(456, "IDS_BAZ", "12502"),
(899, "IDS_NO", "12504"),
(899, "IDS_YES", "12505")]
expected_collisions = [generate_resources_map.Resource(*t)
for t in expected_collision_tuples]
actual_collisions = sorted(
generate_resources_map._CheckForHashCollisions(list_with_collisions))
actual_collisions
self.assertEqual(expected_collisions, actual_collisions)
def testGenerateFileContent(self):
expected = (
"""// This file was generated by generate_resources_map.py. Do not edit.
#include "chrome/browser/metrics/variations/generated_resources_map.h"
namespace chrome_variations {
const uint32_t kResourceHashes[] = {
301430091U, // IDS_BOOKMARKS_NO_ITEMS
2654138887U, // IDS_BOOKMARK_BAR_IMPORT_LINK
2894469061U, // IDS_BOOKMARK_GROUP_FROM_IE
3847176170U, // IDS_BOOKMARK_GROUP_FROM_FIREFOX
};
const int kResourceIndices[] = {
12500, // IDS_BOOKMARKS_NO_ITEMS
12501, // IDS_BOOKMARK_BAR_IMPORT_LINK
12502, // IDS_BOOKMARK_GROUP_FROM_IE
12503, // IDS_BOOKMARK_GROUP_FROM_FIREFOX
};
} // namespace chrome_variations
""")
actual = generate_resources_map._GenerateFileContent(self.TEST_INPUT)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
deshipu/micropython | tests/basics/int1.py | 46 | 1581 | print(int(False))
print(int(True))
print(int(0))
print(int(1))
print(int(+1))
print(int(-1))
print(int('0'))
print(int('+0'))
print(int('-0'))
print(int('1'))
print(int('+1'))
print(int('-1'))
print(int('01'))
print(int('9'))
print(int('10'))
print(int('+10'))
print(int('-10'))
print(int('12'))
print(int('-12'))
print(int('99'))
print(int('100'))
print(int('314'))
print(int(' 314'))
print(int('314 '))
print(int(' \t\t 314 \t\t '))
print(int(' 1 '))
print(int(' -3 '))
print(int('0', 10))
print(int('1', 10))
print(int(' \t 1 \t ', 10))
print(int('11', 10))
print(int('11', 16))
print(int('11', 8))
print(int('11', 2))
print(int('11', 36))
print(int('xyz', 36))
print(int('0o123', 0))
print(int('8388607'))
print(int('0x123', 16))
print(int('0X123', 16))
print(int('0A', 16))
print(int('0o123', 8))
print(int('0O123', 8))
print(int('0123', 8))
print(int('0b100', 2))
print(int('0B100', 2))
print(int('0100', 2))
print(int(' \t 0o12', 8))
print(int('0o12 \t ', 8))
print(int(b"12", 10))
print(int(b"12"))
def test(value, base):
try:
print(int(value, base))
except ValueError:
print('ValueError')
test('x', 0)
test('1x', 0)
test(' 1x', 0)
test(' 1' + chr(2) + ' ', 0)
test('', 0)
test(' ', 0)
test(' \t\t ', 0)
test('0x', 16)
test('0x', 0)
test('0o', 8)
test('0o', 0)
test('0b', 2)
test('0b', 0)
test('0b2', 2)
test('0o8', 8)
test('0xg', 16)
test('1 1', 16)
test('123', 37)
# check that we don't parse this as a floating point number
print(0x1e+1)
# can't convert list to int
try:
int([])
except TypeError:
print("TypeError")
| mit |
jayme-github/CouchPotatoServer | libs/enzyme/real.py | 180 | 4547 | # -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <[email protected]>
# Copyright 2003-2006 Thomas Schueppel <[email protected]>
# Copyright 2003-2006 Dirk Meyer <[email protected]>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['Parser']
import struct
import logging
from exceptions import ParseError
import core
# http://www.pcisys.net/~melanson/codecs/rmff.htm
# http://www.pcisys.net/~melanson/codecs/
# get logging object
log = logging.getLogger(__name__)
class RealVideo(core.AVContainer):
def __init__(self, file):
core.AVContainer.__init__(self)
self.mime = 'video/real'
self.type = 'Real Video'
h = file.read(10)
try:
(object_id, object_size, object_version) = struct.unpack('>4sIH', h)
except struct.error:
# EOF.
raise ParseError()
if not object_id == '.RMF':
raise ParseError()
file_version, num_headers = struct.unpack('>II', file.read(8))
log.debug(u'size: %d, ver: %d, headers: %d' % \
(object_size, file_version, num_headers))
for _ in range(0, num_headers):
try:
oi = struct.unpack('>4sIH', file.read(10))
except (struct.error, IOError):
# Header data we expected wasn't there. File may be
# only partially complete.
break
if object_id == 'DATA' and oi[0] != 'INDX':
log.debug(u'INDX chunk expected after DATA but not found -- file corrupt')
break
(object_id, object_size, object_version) = oi
if object_id == 'DATA':
# Seek over the data chunk rather than reading it in.
file.seek(object_size - 10, 1)
else:
self._read_header(object_id, file.read(object_size - 10))
log.debug(u'%r [%d]' % (object_id, object_size - 10))
# Read all the following headers
def _read_header(self, object_id, s):
if object_id == 'PROP':
prop = struct.unpack('>9IHH', s)
log.debug(u'PROP: %r' % prop)
if object_id == 'MDPR':
mdpr = struct.unpack('>H7I', s[:30])
log.debug(u'MDPR: %r' % mdpr)
self.length = mdpr[7] / 1000.0
(stream_name_size,) = struct.unpack('>B', s[30:31])
stream_name = s[31:31 + stream_name_size]
pos = 31 + stream_name_size
(mime_type_size,) = struct.unpack('>B', s[pos:pos + 1])
mime = s[pos + 1:pos + 1 + mime_type_size]
pos += mime_type_size + 1
(type_specific_len,) = struct.unpack('>I', s[pos:pos + 4])
type_specific = s[pos + 4:pos + 4 + type_specific_len]
pos += 4 + type_specific_len
if mime[:5] == 'audio':
ai = core.AudioStream()
ai.id = mdpr[0]
ai.bitrate = mdpr[2]
self.audio.append(ai)
elif mime[:5] == 'video':
vi = core.VideoStream()
vi.id = mdpr[0]
vi.bitrate = mdpr[2]
self.video.append(vi)
else:
log.debug(u'Unknown: %r' % mime)
if object_id == 'CONT':
pos = 0
(title_len,) = struct.unpack('>H', s[pos:pos + 2])
self.title = s[2:title_len + 2]
pos += title_len + 2
(author_len,) = struct.unpack('>H', s[pos:pos + 2])
self.artist = s[pos + 2:pos + author_len + 2]
pos += author_len + 2
(copyright_len,) = struct.unpack('>H', s[pos:pos + 2])
self.copyright = s[pos + 2:pos + copyright_len + 2]
pos += copyright_len + 2
(comment_len,) = struct.unpack('>H', s[pos:pos + 2])
self.comment = s[pos + 2:pos + comment_len + 2]
Parser = RealVideo
| gpl-3.0 |
pwoodworth/intellij-community | python/helpers/docutils/parsers/rst/directives/html.py | 61 | 3223 | # $Id: html.py 4667 2006-07-12 21:40:56Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Directives for typically HTML-specific constructs.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import nodes, utils
from docutils.parsers.rst import Directive
from docutils.parsers.rst import states
from docutils.transforms import components
class MetaBody(states.SpecializedBody):
class meta(nodes.Special, nodes.PreBibliographic, nodes.Element):
"""HTML-specific "meta" element."""
pass
def field_marker(self, match, context, next_state):
"""Meta element."""
node, blank_finish = self.parsemeta(match)
self.parent += node
return [], next_state, []
def parsemeta(self, match):
name = self.parse_field_marker(match)
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
node = self.meta()
pending = nodes.pending(components.Filter,
{'component': 'writer',
'format': 'html',
'nodes': [node]})
node['content'] = ' '.join(indented)
if not indented:
line = self.state_machine.line
msg = self.reporter.info(
'No content for meta tag "%s".' % name,
nodes.literal_block(line, line),
line=self.state_machine.abs_line_number())
return msg, blank_finish
tokens = name.split()
try:
attname, val = utils.extract_name_value(tokens[0])[0]
node[attname.lower()] = val
except utils.NameValueError:
node['name'] = tokens[0]
for token in tokens[1:]:
try:
attname, val = utils.extract_name_value(token)[0]
node[attname.lower()] = val
except utils.NameValueError, detail:
line = self.state_machine.line
msg = self.reporter.error(
'Error parsing meta tag attribute "%s": %s.'
% (token, detail), nodes.literal_block(line, line),
line=self.state_machine.abs_line_number())
return msg, blank_finish
self.document.note_pending(pending)
return pending, blank_finish
class Meta(Directive):
has_content = True
SMkwargs = {'state_classes': (MetaBody,)}
def run(self):
self.assert_has_content()
node = nodes.Element()
new_line_offset, blank_finish = self.state.nested_list_parse(
self.content, self.content_offset, node,
initial_state='MetaBody', blank_finish=1,
state_machine_kwargs=self.SMkwargs)
if (new_line_offset - self.content_offset) != len(self.content):
# incomplete parse of block?
error = self.state_machine.reporter.error(
'Invalid meta directive.',
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
node += error
return node.children
| apache-2.0 |
cbertinato/pandas | pandas/tests/frame/test_axis_select_reindex.py | 1 | 44030 | from datetime import datetime
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
class TestDataFrameSelectReindex(TestData):
# These are specific reindex-based tests; other indexing tests should go in
# test_indexing
def test_drop_names(self):
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
df.index.name, df.columns.name = 'first', 'second'
df_dropped_b = df.drop('b')
df_dropped_e = df.drop('e', axis=1)
df_inplace_b, df_inplace_e = df.copy(), df.copy()
df_inplace_b.drop('b', inplace=True)
df_inplace_e.drop('e', axis=1, inplace=True)
for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):
assert obj.index.name == 'first'
assert obj.columns.name == 'second'
assert list(df.columns) == ['d', 'e', 'f']
msg = r"\['g'\] not found in axis"
with pytest.raises(KeyError, match=msg):
df.drop(['g'])
with pytest.raises(KeyError, match=msg):
df.drop(['g'], 1)
# errors = 'ignore'
dropped = df.drop(['g'], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
tm.assert_index_equal(dropped.index, expected)
dropped = df.drop(['b', 'g'], errors='ignore')
expected = Index(['a', 'c'], name='first')
tm.assert_index_equal(dropped.index, expected)
dropped = df.drop(['g'], axis=1, errors='ignore')
expected = Index(['d', 'e', 'f'], name='second')
tm.assert_index_equal(dropped.columns, expected)
dropped = df.drop(['d', 'g'], axis=1, errors='ignore')
expected = Index(['e', 'f'], name='second')
tm.assert_index_equal(dropped.columns, expected)
# GH 16398
dropped = df.drop([], errors='ignore')
expected = Index(['a', 'b', 'c'], name='first')
tm.assert_index_equal(dropped.index, expected)
def test_drop_col_still_multiindex(self):
arrays = [['a', 'b', 'c', 'top'],
['', '', '', 'OD'],
['', '', '', 'wx']]
tuples = sorted(zip(*arrays))
index = MultiIndex.from_tuples(tuples)
df = DataFrame(np.random.randn(3, 4), columns=index)
del df[('a', '', '')]
assert(isinstance(df.columns, MultiIndex))
def test_drop(self):
simple = DataFrame({"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]})
assert_frame_equal(simple.drop("A", axis=1), simple[['B']])
assert_frame_equal(simple.drop(["A", "B"], axis='columns'),
simple[[]])
assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
assert_frame_equal(simple.drop(
[0, 3], axis='index'), simple.loc[[1, 2], :])
with pytest.raises(KeyError, match=r"\[5\] not found in axis"):
simple.drop(5)
with pytest.raises(KeyError, match=r"\['C'\] not found in axis"):
simple.drop('C', 1)
with pytest.raises(KeyError, match=r"\[5\] not found in axis"):
simple.drop([1, 5])
with pytest.raises(KeyError, match=r"\['C'\] not found in axis"):
simple.drop(['A', 'C'], 1)
# errors = 'ignore'
assert_frame_equal(simple.drop(5, errors='ignore'), simple)
assert_frame_equal(simple.drop([0, 5], errors='ignore'),
simple.loc[[1, 2, 3], :])
assert_frame_equal(simple.drop('C', axis=1, errors='ignore'), simple)
assert_frame_equal(simple.drop(['A', 'C'], axis=1, errors='ignore'),
simple[['B']])
# non-unique - wheee!
nu_df = DataFrame(list(zip(range(3), range(-3, 1), list('abc'))),
columns=['a', 'a', 'b'])
assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])
assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])
assert_frame_equal(nu_df.drop([]), nu_df) # GH 16398
nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))
nu_df.columns = list('abc')
assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.loc[["Y"], :])
assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.loc[[], :])
# inplace cache issue
# GH 5628
df = pd.DataFrame(np.random.randn(10, 3), columns=list('abc'))
expected = df[~(df.b > 0)]
df.drop(labels=df[df.b > 0].index, inplace=True)
assert_frame_equal(df, expected)
def test_drop_multiindex_not_lexsorted(self):
# GH 11640
# define the lexsorted version
lexsorted_mi = MultiIndex.from_tuples(
[('a', ''), ('b1', 'c1'), ('b2', 'c2')], names=['b', 'c'])
lexsorted_df = DataFrame([[1, 3, 4]], columns=lexsorted_mi)
assert lexsorted_df.columns.is_lexsorted()
# define the non-lexsorted version
not_lexsorted_df = DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3],
[1, 'b2', 'c2', 4]])
not_lexsorted_df = not_lexsorted_df.pivot_table(
index='a', columns=['b', 'c'], values='d')
not_lexsorted_df = not_lexsorted_df.reset_index()
assert not not_lexsorted_df.columns.is_lexsorted()
# compare the results
tm.assert_frame_equal(lexsorted_df, not_lexsorted_df)
expected = lexsorted_df.drop('a', axis=1)
with tm.assert_produces_warning(PerformanceWarning):
result = not_lexsorted_df.drop('a', axis=1)
tm.assert_frame_equal(result, expected)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's (GH12392)
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
res1 = df.drop('a')
res2 = df.drop(index='a')
tm.assert_frame_equal(res1, res2)
res1 = df.drop('d', 1)
res2 = df.drop(columns='d')
tm.assert_frame_equal(res1, res2)
res1 = df.drop(labels='e', axis=1)
res2 = df.drop(columns='e')
tm.assert_frame_equal(res1, res2)
res1 = df.drop(['a'], axis=0)
res2 = df.drop(index=['a'])
tm.assert_frame_equal(res1, res2)
res1 = df.drop(['a'], axis=0).drop(['d'], axis=1)
res2 = df.drop(index=['a'], columns=['d'])
tm.assert_frame_equal(res1, res2)
with pytest.raises(ValueError):
df.drop(labels='a', index='b')
with pytest.raises(ValueError):
df.drop(labels='a', columns='b')
with pytest.raises(ValueError):
df.drop(axis=1)
def test_merge_join_different_levels(self):
# GH 9455
# first dataframe
df1 = DataFrame(columns=['a', 'b'], data=[[1, 11], [0, 22]])
# second dataframe
columns = MultiIndex.from_tuples([('a', ''), ('c', 'c1')])
df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]])
# merge
columns = ['a', 'b', ('c', 'c1')]
expected = DataFrame(columns=columns, data=[[1, 11, 33], [0, 22, 44]])
with tm.assert_produces_warning(UserWarning):
result = pd.merge(df1, df2, on='a')
tm.assert_frame_equal(result, expected)
# join, see discussion in GH 12219
columns = ['a', 'b', ('a', ''), ('c', 'c1')]
expected = DataFrame(columns=columns,
data=[[1, 11, 0, 44], [0, 22, 1, 33]])
with tm.assert_produces_warning(UserWarning):
result = df1.join(df2, on='a')
tm.assert_frame_equal(result, expected)
def test_reindex(self):
newFrame = self.frame.reindex(self.ts1.index)
for col in newFrame.columns:
for idx, val in newFrame[col].items():
if idx in self.frame.index:
if np.isnan(val):
assert np.isnan(self.frame[col][idx])
else:
assert val == self.frame[col][idx]
else:
assert np.isnan(val)
for col, series in newFrame.items():
assert tm.equalContents(series.index, newFrame.index)
emptyFrame = self.frame.reindex(Index([]))
assert len(emptyFrame.index) == 0
# Cython code should be unit-tested directly
nonContigFrame = self.frame.reindex(self.ts1.index[::2])
for col in nonContigFrame.columns:
for idx, val in nonContigFrame[col].items():
if idx in self.frame.index:
if np.isnan(val):
assert np.isnan(self.frame[col][idx])
else:
assert val == self.frame[col][idx]
else:
assert np.isnan(val)
for col, series in nonContigFrame.items():
assert tm.equalContents(series.index, nonContigFrame.index)
# corner cases
# Same index, copies values but not index if copy=False
newFrame = self.frame.reindex(self.frame.index, copy=False)
assert newFrame.index is self.frame.index
# length zero
newFrame = self.frame.reindex([])
assert newFrame.empty
assert len(newFrame.columns) == len(self.frame.columns)
# length zero with columns reindexed with non-empty index
newFrame = self.frame.reindex([])
newFrame = newFrame.reindex(self.frame.index)
assert len(newFrame.index) == len(self.frame.index)
assert len(newFrame.columns) == len(self.frame.columns)
# pass non-Index
newFrame = self.frame.reindex(list(self.ts1.index))
tm.assert_index_equal(newFrame.index, self.ts1.index)
# copy with no axes
result = self.frame.reindex()
assert_frame_equal(result, self.frame)
assert result is not self.frame
def test_reindex_nan(self):
df = pd.DataFrame([[1, 2], [3, 5], [7, 11], [9, 23]],
index=[2, np.nan, 1, 5],
columns=['joe', 'jim'])
i, j = [np.nan, 5, 5, np.nan, 1, 2, np.nan], [1, 3, 3, 1, 2, 0, 1]
assert_frame_equal(df.reindex(i), df.iloc[j])
df.index = df.index.astype('object')
assert_frame_equal(df.reindex(i), df.iloc[j], check_index_type=False)
# GH10388
df = pd.DataFrame({'other': ['a', 'b', np.nan, 'c'],
'date': ['2015-03-22', np.nan,
'2012-01-08', np.nan],
'amount': [2, 3, 4, 5]})
df['date'] = pd.to_datetime(df.date)
df['delta'] = (pd.to_datetime('2015-06-18') - df['date']).shift(1)
left = df.set_index(['delta', 'other', 'date']).reset_index()
right = df.reindex(columns=['delta', 'other', 'date', 'amount'])
assert_frame_equal(left, right)
def test_reindex_name_remains(self):
s = Series(np.random.rand(10))
df = DataFrame(s, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(i)
assert df.index.name == 'iname'
df = df.reindex(Index(np.arange(10), name='tmpname'))
assert df.index.name == 'tmpname'
s = Series(np.random.rand(10))
df = DataFrame(s.T, index=np.arange(len(s)))
i = Series(np.arange(10), name='iname')
df = df.reindex(columns=i)
assert df.columns.name == 'iname'
def test_reindex_int(self):
smaller = self.intframe.reindex(self.intframe.index[::2])
assert smaller['A'].dtype == np.int64
bigger = smaller.reindex(self.intframe.index)
assert bigger['A'].dtype == np.float64
smaller = self.intframe.reindex(columns=['A', 'B'])
assert smaller['A'].dtype == np.int64
def test_reindex_like(self):
other = self.frame.reindex(index=self.frame.index[:10],
columns=['C', 'B'])
assert_frame_equal(other, self.frame.reindex_like(other))
def test_reindex_columns(self):
new_frame = self.frame.reindex(columns=['A', 'B', 'E'])
tm.assert_series_equal(new_frame['B'], self.frame['B'])
assert np.isnan(new_frame['E']).all()
assert 'C' not in new_frame
# Length zero
new_frame = self.frame.reindex(columns=[])
assert new_frame.empty
def test_reindex_columns_method(self):
# GH 14992, reindexing over columns ignored method
df = DataFrame(data=[[11, 12, 13], [21, 22, 23], [31, 32, 33]],
index=[1, 2, 4],
columns=[1, 2, 4],
dtype=float)
# default method
result = df.reindex(columns=range(6))
expected = DataFrame(data=[[np.nan, 11, 12, np.nan, 13, np.nan],
[np.nan, 21, 22, np.nan, 23, np.nan],
[np.nan, 31, 32, np.nan, 33, np.nan]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
assert_frame_equal(result, expected)
# method='ffill'
result = df.reindex(columns=range(6), method='ffill')
expected = DataFrame(data=[[np.nan, 11, 12, 12, 13, 13],
[np.nan, 21, 22, 22, 23, 23],
[np.nan, 31, 32, 32, 33, 33]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
assert_frame_equal(result, expected)
# method='bfill'
result = df.reindex(columns=range(6), method='bfill')
expected = DataFrame(data=[[11, 11, 12, 13, 13, np.nan],
[21, 21, 22, 23, 23, np.nan],
[31, 31, 32, 33, 33, np.nan]],
index=[1, 2, 4],
columns=range(6),
dtype=float)
assert_frame_equal(result, expected)
def test_reindex_axes(self):
# GH 3317, reindexing by both axes loses freq of the index
df = DataFrame(np.ones((3, 3)),
index=[datetime(2012, 1, 1),
datetime(2012, 1, 2),
datetime(2012, 1, 3)],
columns=['a', 'b', 'c'])
time_freq = date_range('2012-01-01', '2012-01-03', freq='d')
some_cols = ['a', 'b']
index_freq = df.reindex(index=time_freq).index.freq
both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq
seq_freq = df.reindex(index=time_freq).reindex(
columns=some_cols).index.freq
assert index_freq == both_freq
assert index_freq == seq_freq
def test_reindex_fill_value(self):
df = DataFrame(np.random.randn(10, 4))
# axis=0
result = df.reindex(list(range(15)))
assert np.isnan(result.values[-5:]).all()
result = df.reindex(range(15), fill_value=0)
expected = df.reindex(range(15)).fillna(0)
assert_frame_equal(result, expected)
# axis=1
result = df.reindex(columns=range(5), fill_value=0.)
expected = df.copy()
expected[4] = 0.
assert_frame_equal(result, expected)
result = df.reindex(columns=range(5), fill_value=0)
expected = df.copy()
expected[4] = 0
assert_frame_equal(result, expected)
result = df.reindex(columns=range(5), fill_value='foo')
expected = df.copy()
expected[4] = 'foo'
assert_frame_equal(result, expected)
# reindex_axis
with tm.assert_produces_warning(FutureWarning):
result = df.reindex_axis(range(15), fill_value=0., axis=0)
expected = df.reindex(range(15)).fillna(0)
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.reindex_axis(range(5), fill_value=0., axis=1)
expected = df.reindex(columns=range(5)).fillna(0)
assert_frame_equal(result, expected)
# other dtypes
df['foo'] = 'foo'
result = df.reindex(range(15), fill_value=0)
expected = df.reindex(range(15)).fillna(0)
assert_frame_equal(result, expected)
def test_reindex_dups(self):
# GH4746, reindex on duplicate index error messages
arr = np.random.randn(10)
df = DataFrame(arr, index=[1, 2, 3, 4, 5, 1, 2, 3, 4, 5])
# set index is ok
result = df.copy()
result.index = list(range(len(df)))
expected = DataFrame(arr, index=list(range(len(df))))
assert_frame_equal(result, expected)
# reindex fails
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df.reindex(index=list(range(len(df))))
def test_reindex_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
expected = pd.DataFrame({"A": [1, 2, np.nan], "B": [4, 5, np.nan]},
index=[0, 1, 3])
result = df.reindex([0, 1, 3])
assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis=0)
assert_frame_equal(result, expected)
result = df.reindex([0, 1, 3], axis='index')
assert_frame_equal(result, expected)
def test_reindex_positional_warns(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
expected = pd.DataFrame({"A": [1., 2], 'B': [4., 5],
"C": [np.nan, np.nan]})
with tm.assert_produces_warning(FutureWarning):
result = df.reindex([0, 1], ['A', 'B', 'C'])
assert_frame_equal(result, expected)
def test_reindex_axis_style_raises(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], 'B': [4, 5, 6]})
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex([0, 1], ['A'], axis=1)
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex([0, 1], ['A'], axis='index')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='index')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='columns')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(columns=[0, 1], axis='columns')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], columns=[0, 1], axis='columns')
with pytest.raises(TypeError, match='Cannot specify all'):
df.reindex([0, 1], [0], ['A'])
# Mixing styles
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='index')
with pytest.raises(TypeError, match="Cannot specify both 'axis'"):
df.reindex(index=[0, 1], axis='columns')
# Duplicates
with pytest.raises(TypeError, match="multiple values"):
df.reindex([0, 1], labels=[0, 1])
def test_reindex_single_named_indexer(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = pd.DataFrame({"A": [1, 2, 3], "B": [1, 2, 3]})
result = df.reindex([0, 1], columns=['A'])
expected = pd.DataFrame({"A": [1, 2]})
assert_frame_equal(result, expected)
def test_reindex_api_equivalence(self):
# https://github.com/pandas-dev/pandas/issues/12392
# equivalence of the labels/axis and index/columns API's
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
res1 = df.reindex(['b', 'a'])
res2 = df.reindex(index=['b', 'a'])
res3 = df.reindex(labels=['b', 'a'])
res4 = df.reindex(labels=['b', 'a'], axis=0)
res5 = df.reindex(['b', 'a'], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=['e', 'd'])
res2 = df.reindex(['e', 'd'], axis=1)
res3 = df.reindex(labels=['e', 'd'], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
with tm.assert_produces_warning(FutureWarning) as m:
res1 = df.reindex(['b', 'a'], ['e', 'd'])
assert 'reindex' in str(m[0].message)
res2 = df.reindex(columns=['e', 'd'], index=['b', 'a'])
res3 = df.reindex(labels=['b', 'a'], axis=0).reindex(labels=['e', 'd'],
axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_align(self):
af, bf = self.frame.align(self.frame)
assert af._data is not self.frame._data
af, bf = self.frame.align(self.frame, copy=False)
assert af._data is self.frame._data
# axis = 0
other = self.frame.iloc[:-5, :3]
af, bf = self.frame.align(other, axis=0, fill_value=-1)
tm.assert_index_equal(bf.columns, other.columns)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
diff_b_vals = bf.reindex(diff_b).values
assert (diff_a_vals == -1).all()
af, bf = self.frame.align(other, join='right', axis=0)
tm.assert_index_equal(bf.columns, other.columns)
tm.assert_index_equal(bf.index, other.index)
tm.assert_index_equal(af.index, other.index)
# axis = 1
other = self.frame.iloc[:-5, :3].copy()
af, bf = self.frame.align(other, axis=1)
tm.assert_index_equal(bf.columns, self.frame.columns)
tm.assert_index_equal(bf.index, other.index)
# test fill value
join_idx = self.frame.index.join(other.index)
diff_a = self.frame.index.difference(join_idx)
diff_b = other.index.difference(join_idx)
diff_a_vals = af.reindex(diff_a).values
# TODO(wesm): unused?
diff_b_vals = bf.reindex(diff_b).values # noqa
assert (diff_a_vals == -1).all()
af, bf = self.frame.align(other, join='inner', axis=1)
tm.assert_index_equal(bf.columns, other.columns)
af, bf = self.frame.align(other, join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, other.columns)
# test other non-float types
af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, other.columns)
af, bf = self.mixed_frame.align(self.mixed_frame,
join='inner', axis=1, method='pad')
tm.assert_index_equal(bf.columns, self.mixed_frame.columns)
af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=None)
tm.assert_index_equal(bf.index, Index([]))
af, bf = self.frame.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
# mixed floats/ints
af, bf = self.mixed_float.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
af, bf = self.mixed_int.align(other.iloc[:, 0], join='inner', axis=1,
method=None, fill_value=0)
tm.assert_index_equal(bf.index, Index([]))
# Try to align DataFrame to Series along bad axis
with pytest.raises(ValueError):
self.frame.align(af.iloc[0, :3], join='inner', axis=2)
# align dataframe to series with broadcast or not
idx = self.frame.index
s = Series(range(len(idx)), index=idx)
left, right = self.frame.align(s, axis=0)
tm.assert_index_equal(left.index, self.frame.index)
tm.assert_index_equal(right.index, self.frame.index)
assert isinstance(right, Series)
left, right = self.frame.align(s, broadcast_axis=1)
tm.assert_index_equal(left.index, self.frame.index)
expected = {c: s for c in self.frame.columns}
expected = DataFrame(expected, index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(right, expected)
# see gh-9558
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
result = df[df['a'] == 2]
expected = DataFrame([[2, 5]], index=[1], columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
result = df.where(df['a'] == 2, 0)
expected = DataFrame({'a': [0, 2, 0], 'b': [0, 5, 0]})
tm.assert_frame_equal(result, expected)
def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):
aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,
fill_axis=fill_axis)
join_index, join_columns = None, None
ea, eb = a, b
if axis is None or axis == 0:
join_index = a.index.join(b.index, how=how)
ea = ea.reindex(index=join_index)
eb = eb.reindex(index=join_index)
if axis is None or axis == 1:
join_columns = a.columns.join(b.columns, how=how)
ea = ea.reindex(columns=join_columns)
eb = eb.reindex(columns=join_columns)
ea = ea.fillna(axis=fill_axis, method=method, limit=limit)
eb = eb.fillna(axis=fill_axis, method=method, limit=limit)
assert_frame_equal(aa, ea)
assert_frame_equal(ab, eb)
@pytest.mark.parametrize('meth', ['pad', 'bfill'])
@pytest.mark.parametrize('ax', [0, 1, None])
@pytest.mark.parametrize('fax', [0, 1])
@pytest.mark.parametrize('how', ['inner', 'outer', 'left', 'right'])
def test_align_fill_method(self, how, meth, ax, fax):
self._check_align_fill(how, meth, ax, fax)
def _check_align_fill(self, kind, meth, ax, fax):
left = self.frame.iloc[0:4, :10]
right = self.frame.iloc[2:, 6:]
empty = self.frame.iloc[:0, :0]
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty left
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, right, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# empty right
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(left, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
# both empty
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth)
self._check_align(empty, empty, axis=ax, fill_axis=fax,
how=kind, method=meth, limit=1)
def test_align_int_fill_bug(self):
# GH #910
X = np.arange(10 * 10, dtype='float64').reshape(10, 10)
Y = np.ones((10, 1), dtype=int)
df1 = DataFrame(X)
df1['0.X'] = Y.squeeze()
df2 = df1.astype(float)
result = df1 - df1.mean()
expected = df2 - df2.mean()
assert_frame_equal(result, expected)
def test_align_multiindex(self):
# GH 10665
# same test cases as test_align_multiindex in test_series.py
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
df1 = pd.DataFrame(np.arange(12, dtype='int64'), index=midx)
df2 = pd.DataFrame(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = df1.align(df2, join='left')
res2l, res2r = df2.align(df1, join='right')
expl = df1
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
res1l, res1r = df1.align(df2, join='right')
res2l, res2r = df2.align(df1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
assert_frame_equal(expl, res1l)
assert_frame_equal(expl, res2r)
expr = pd.DataFrame([0, 0, 1, 1] * 2, index=exp_idx)
assert_frame_equal(expr, res1r)
assert_frame_equal(expr, res2l)
def test_align_series_combinations(self):
df = pd.DataFrame({'a': [1, 3, 5],
'b': [1, 3, 5]}, index=list('ACE'))
s = pd.Series([1, 2, 4], index=list('ABD'), name='x')
# frame + series
res1, res2 = df.align(s, axis=0)
exp1 = pd.DataFrame({'a': [1, np.nan, 3, np.nan, 5],
'b': [1, np.nan, 3, np.nan, 5]},
index=list('ABCDE'))
exp2 = pd.Series([1, 2, np.nan, 4, np.nan],
index=list('ABCDE'), name='x')
tm.assert_frame_equal(res1, exp1)
tm.assert_series_equal(res2, exp2)
# series + frame
res1, res2 = s.align(df)
tm.assert_series_equal(res1, exp2)
tm.assert_frame_equal(res2, exp1)
def test_filter(self):
# Items
filtered = self.frame.filter(['A', 'B', 'E'])
assert len(filtered.columns) == 2
assert 'E' not in filtered
filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')
assert len(filtered.columns) == 2
assert 'E' not in filtered
# Other axis
idx = self.frame.index[0:4]
filtered = self.frame.filter(idx, axis='index')
expected = self.frame.reindex(index=idx)
tm.assert_frame_equal(filtered, expected)
# like
fcopy = self.frame.copy()
fcopy['AA'] = 1
filtered = fcopy.filter(like='A')
assert len(filtered.columns) == 2
assert 'AA' in filtered
# like with ints in column names
df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])
filtered = df.filter(like='_')
assert len(filtered.columns) == 2
# regex with ints in column names
# from PR #10384
df = DataFrame(0., index=[0, 1, 2], columns=['A1', 1, 'B', 2, 'C'])
expected = DataFrame(
0., index=[0, 1, 2], columns=pd.Index([1, 2], dtype=object))
filtered = df.filter(regex='^[0-9]+$')
tm.assert_frame_equal(filtered, expected)
expected = DataFrame(0., index=[0, 1, 2], columns=[0, '0', 1, '1'])
# shouldn't remove anything
filtered = expected.filter(regex='^[0-9]+$')
tm.assert_frame_equal(filtered, expected)
# pass in None
with pytest.raises(TypeError, match='Must pass'):
self.frame.filter()
with pytest.raises(TypeError, match='Must pass'):
self.frame.filter(items=None)
with pytest.raises(TypeError, match='Must pass'):
self.frame.filter(axis=1)
# test mutually exclusive arguments
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], regex='e$', like='bbi')
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], regex='e$', axis=1)
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], regex='e$')
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], like='bbi', axis=0)
with pytest.raises(TypeError, match='mutually exclusive'):
self.frame.filter(items=['one', 'three'], like='bbi')
# objects
filtered = self.mixed_frame.filter(like='foo')
assert 'foo' in filtered
# unicode columns, won't ascii-encode
df = self.frame.rename(columns={'B': '\u2202'})
filtered = df.filter(like='C')
assert 'C' in filtered
def test_filter_regex_search(self):
fcopy = self.frame.copy()
fcopy['AA'] = 1
# regex
filtered = fcopy.filter(regex='[A]+')
assert len(filtered.columns) == 2
assert 'AA' in filtered
# doesn't have to be at beginning
df = DataFrame({'aBBa': [1, 2],
'BBaBB': [1, 2],
'aCCa': [1, 2],
'aCCaBB': [1, 2]})
result = df.filter(regex='BB')
exp = df[[x for x in df.columns if 'BB' in x]]
assert_frame_equal(result, exp)
@pytest.mark.parametrize('name,expected', [
('a', DataFrame({'a': [1, 2]})),
('a', DataFrame({'a': [1, 2]})),
('あ', DataFrame({'あ': [3, 4]}))
])
def test_filter_unicode(self, name, expected):
# GH13101
df = DataFrame({'a': [1, 2], 'あ': [3, 4]})
assert_frame_equal(df.filter(like=name), expected)
assert_frame_equal(df.filter(regex=name), expected)
@pytest.mark.parametrize('name', ['a', 'a'])
def test_filter_bytestring(self, name):
# GH13101
df = DataFrame({b'a': [1, 2], b'b': [3, 4]})
expected = DataFrame({b'a': [1, 2]})
assert_frame_equal(df.filter(like=name), expected)
assert_frame_equal(df.filter(regex=name), expected)
def test_filter_corner(self):
empty = DataFrame()
result = empty.filter([])
assert_frame_equal(result, empty)
result = empty.filter(like='foo')
assert_frame_equal(result, empty)
def test_take(self):
# homogeneous
order = [3, 1, 2, 0]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['D', 'B', 'C', 'A']]
assert_frame_equal(result, expected, check_names=False)
# negative indices
order = [2, 1, -1]
for df in [self.frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.take(order, convert=True, axis=0)
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.take(order, convert=False, axis=0)
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['C', 'B', 'D']]
assert_frame_equal(result, expected, check_names=False)
# illegal indices
msg = "indices are out-of-bounds"
with pytest.raises(IndexError, match=msg):
df.take([3, 1, 2, 30], axis=0)
with pytest.raises(IndexError, match=msg):
df.take([3, 1, 2, -31], axis=0)
with pytest.raises(IndexError, match=msg):
df.take([3, 1, 2, 5], axis=1)
with pytest.raises(IndexError, match=msg):
df.take([3, 1, 2, -5], axis=1)
# mixed-dtype
order = [4, 1, 2, 0, 3]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['foo', 'B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
# negative indices
order = [4, 1, -2]
for df in [self.mixed_frame]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['foo', 'B', 'D']]
assert_frame_equal(result, expected)
# by dtype
order = [1, 2, 0, 3]
for df in [self.mixed_float, self.mixed_int]:
result = df.take(order, axis=0)
expected = df.reindex(df.index.take(order))
assert_frame_equal(result, expected)
# axis = 1
result = df.take(order, axis=1)
expected = df.loc[:, ['B', 'C', 'A', 'D']]
assert_frame_equal(result, expected)
def test_reindex_boolean(self):
frame = DataFrame(np.ones((10, 2), dtype=bool),
index=np.arange(0, 20, 2),
columns=[0, 2])
reindexed = frame.reindex(np.arange(10))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[0][1])
reindexed = frame.reindex(columns=range(3))
assert reindexed.values.dtype == np.object_
assert isna(reindexed[1]).all()
def test_reindex_objects(self):
reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])
assert 'foo' in reindexed
reindexed = self.mixed_frame.reindex(columns=['A', 'B'])
assert 'foo' not in reindexed
def test_reindex_corner(self):
index = Index(['a', 'b', 'c'])
dm = self.empty.reindex(index=[1, 2, 3])
reindexed = dm.reindex(columns=index)
tm.assert_index_equal(reindexed.columns, index)
# ints are weird
smaller = self.intframe.reindex(columns=['A', 'B', 'E'])
assert smaller['E'].dtype == np.float64
def test_reindex_axis(self):
cols = ['A', 'B', 'E']
with tm.assert_produces_warning(FutureWarning) as m:
reindexed1 = self.intframe.reindex_axis(cols, axis=1)
assert 'reindex' in str(m[0].message)
reindexed2 = self.intframe.reindex(columns=cols)
assert_frame_equal(reindexed1, reindexed2)
rows = self.intframe.index[0:5]
with tm.assert_produces_warning(FutureWarning) as m:
reindexed1 = self.intframe.reindex_axis(rows, axis=0)
assert 'reindex' in str(m[0].message)
reindexed2 = self.intframe.reindex(index=rows)
assert_frame_equal(reindexed1, reindexed2)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
self.intframe.reindex_axis(rows, axis=2)
# no-op case
cols = self.frame.columns.copy()
with tm.assert_produces_warning(FutureWarning) as m:
newFrame = self.frame.reindex_axis(cols, axis=1)
assert 'reindex' in str(m[0].message)
assert_frame_equal(newFrame, self.frame)
def test_reindex_with_nans(self):
df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],
columns=['a', 'b'],
index=[100.0, 101.0, np.nan, 102.0, 103.0])
result = df.reindex(index=[101.0, 102.0, 103.0])
expected = df.iloc[[1, 3, 4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[103.0])
expected = df.iloc[[4]]
assert_frame_equal(result, expected)
result = df.reindex(index=[101.0])
expected = df.iloc[[1]]
assert_frame_equal(result, expected)
def test_reindex_multi(self):
df = DataFrame(np.random.randn(3, 3))
result = df.reindex(index=range(4), columns=range(4))
expected = df.reindex(list(range(4))).reindex(columns=range(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(index=range(4), columns=range(4))
expected = df.reindex(list(range(4))).reindex(columns=range(4))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randint(0, 10, (3, 3)))
result = df.reindex(index=range(2), columns=range(2))
expected = df.reindex(range(2)).reindex(columns=range(2))
assert_frame_equal(result, expected)
df = DataFrame(np.random.randn(5, 3) + 1j, columns=['a', 'b', 'c'])
result = df.reindex(index=[0, 1], columns=['a', 'b'])
expected = df.reindex([0, 1]).reindex(columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_reindex_multi_categorical_time(self):
# https://github.com/pandas-dev/pandas/issues/21390
midx = pd.MultiIndex.from_product(
[Categorical(['a', 'b', 'c']),
Categorical(date_range("2012-01-01", periods=3, freq='H'))])
df = pd.DataFrame({'a': range(len(midx))}, index=midx)
df2 = df.iloc[[0, 1, 2, 3, 4, 5, 6, 8]]
result = df2.reindex(midx)
expected = pd.DataFrame(
{'a': [0, 1, 2, 3, 4, 5, 6, np.nan, 8]}, index=midx)
assert_frame_equal(result, expected)
data = [[1, 2, 3], [1, 2, 3]]
@pytest.mark.parametrize('actual', [
DataFrame(data=data, index=['a', 'a']),
DataFrame(data=data, index=['a', 'b']),
DataFrame(data=data, index=['a', 'b']).set_index([0, 1]),
DataFrame(data=data, index=['a', 'a']).set_index([0, 1])
])
def test_raise_on_drop_duplicate_index(self, actual):
# issue 19186
level = 0 if isinstance(actual.index, MultiIndex) else None
with pytest.raises(KeyError):
actual.drop('c', level=level, axis=0)
with pytest.raises(KeyError):
actual.T.drop('c', level=level, axis=1)
expected_no_err = actual.drop('c', axis=0, level=level,
errors='ignore')
assert_frame_equal(expected_no_err, actual)
expected_no_err = actual.T.drop('c', axis=1, level=level,
errors='ignore')
assert_frame_equal(expected_no_err.T, actual)
@pytest.mark.parametrize('index', [[1, 2, 3], [1, 1, 2]])
@pytest.mark.parametrize('drop_labels', [[], [1], [2]])
def test_drop_empty_list(self, index, drop_labels):
# GH 21494
expected_index = [i for i in index if i not in drop_labels]
frame = pd.DataFrame(index=index).drop(drop_labels)
tm.assert_frame_equal(frame, pd.DataFrame(index=expected_index))
@pytest.mark.parametrize('index', [[1, 2, 3], [1, 2, 2]])
@pytest.mark.parametrize('drop_labels', [[1, 4], [4, 5]])
def test_drop_non_empty_list(self, index, drop_labels):
# GH 21494
with pytest.raises(KeyError, match='not found in axis'):
pd.DataFrame(index=index).drop(drop_labels)
| bsd-3-clause |
lfz/Guided-Denoise | Attackset/Iter4_ensv3_resv2_inresv2_random/nets/inception_v4.py | 45 | 15643 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of the Inception V4 architecture.
As described in http://arxiv.org/abs/1602.07261.
Inception-v4, Inception-ResNet and the Impact of Residual Connections
on Learning
Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import inception_utils
slim = tf.contrib.slim
def block_inception_a(inputs, scope=None, reuse=None):
"""Builds Inception-A block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionA', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 96, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 96, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
def block_reduction_a(inputs, scope=None, reuse=None):
"""Builds Reduction-A block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockReductionA', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [3, 3], stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
def block_inception_b(inputs, scope=None, reuse=None):
"""Builds Inception-B block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionB', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 256, [7, 1], scope='Conv2d_0c_7x1')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 192, [7, 1], scope='Conv2d_0b_7x1')
branch_2 = slim.conv2d(branch_2, 224, [1, 7], scope='Conv2d_0c_1x7')
branch_2 = slim.conv2d(branch_2, 224, [7, 1], scope='Conv2d_0d_7x1')
branch_2 = slim.conv2d(branch_2, 256, [1, 7], scope='Conv2d_0e_1x7')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
def block_reduction_b(inputs, scope=None, reuse=None):
"""Builds Reduction-B block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockReductionB', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 192, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 320, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
def block_inception_c(inputs, scope=None, reuse=None):
"""Builds Inception-C block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockInceptionC', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = tf.concat(axis=3, values=[
slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'),
slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')])
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1')
branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3')
branch_2 = tf.concat(axis=3, values=[
slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'),
slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')])
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1')
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None):
"""Creates the Inception V4 network up to the given final endpoint.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
final_endpoint: specifies the endpoint to construct the network up to.
It can be one of [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'Mixed_3a', 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e',
'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c',
'Mixed_7d']
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
"""
end_points = {}
def add_and_check_final(name, net):
end_points[name] = net
return name == final_endpoint
with tf.variable_scope(scope, 'InceptionV4', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 299 x 299 x 3
net = slim.conv2d(inputs, 32, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
if add_and_check_final('Conv2d_1a_3x3', net): return net, end_points
# 149 x 149 x 32
net = slim.conv2d(net, 32, [3, 3], padding='VALID',
scope='Conv2d_2a_3x3')
if add_and_check_final('Conv2d_2a_3x3', net): return net, end_points
# 147 x 147 x 32
net = slim.conv2d(net, 64, [3, 3], scope='Conv2d_2b_3x3')
if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points
# 147 x 147 x 64
with tf.variable_scope('Mixed_3a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_0a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [3, 3], stride=2, padding='VALID',
scope='Conv2d_0a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_3a', net): return net, end_points
# 73 x 73 x 160
with tf.variable_scope('Mixed_4a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 96, [3, 3], padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 64, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 64, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 96, [3, 3], padding='VALID',
scope='Conv2d_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_4a', net): return net, end_points
# 71 x 71 x 192
with tf.variable_scope('Mixed_5a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [3, 3], stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_5a', net): return net, end_points
# 35 x 35 x 384
# 4 x Inception-A blocks
for idx in range(4):
block_scope = 'Mixed_5' + chr(ord('b') + idx)
net = block_inception_a(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
# 35 x 35 x 384
# Reduction-A block
net = block_reduction_a(net, 'Mixed_6a')
if add_and_check_final('Mixed_6a', net): return net, end_points
# 17 x 17 x 1024
# 7 x Inception-B blocks
for idx in range(7):
block_scope = 'Mixed_6' + chr(ord('b') + idx)
net = block_inception_b(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
# 17 x 17 x 1024
# Reduction-B block
net = block_reduction_b(net, 'Mixed_7a')
if add_and_check_final('Mixed_7a', net): return net, end_points
# 8 x 8 x 1536
# 3 x Inception-C blocks
for idx in range(3):
block_scope = 'Mixed_7' + chr(ord('b') + idx)
net = block_inception_c(net, block_scope)
if add_and_check_final(block_scope, net): return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v4(inputs, num_classes=1001, is_training=True,
dropout_keep_prob=0.8,
reuse=None,
scope='InceptionV4',
create_aux_logits=True):
"""Creates the Inception V4 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
create_aux_logits: Whether to include the auxiliary logits.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionV4', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v4_base(inputs, scope=scope)
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# Auxiliary Head logits
if create_aux_logits:
with tf.variable_scope('AuxLogits'):
# 17 x 17 x 1024
aux_logits = end_points['Mixed_6h']
aux_logits = slim.avg_pool2d(aux_logits, [5, 5], stride=3,
padding='VALID',
scope='AvgPool_1a_5x5')
aux_logits = slim.conv2d(aux_logits, 128, [1, 1],
scope='Conv2d_1b_1x1')
aux_logits = slim.conv2d(aux_logits, 768,
aux_logits.get_shape()[1:3],
padding='VALID', scope='Conv2d_2a')
aux_logits = slim.flatten(aux_logits)
aux_logits = slim.fully_connected(aux_logits, num_classes,
activation_fn=None,
scope='Aux_logits')
end_points['AuxLogits'] = aux_logits
# Final pooling and prediction
with tf.variable_scope('Logits'):
# 8 x 8 x 1536
net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
scope='AvgPool_1a')
# 1 x 1 x 1536
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_1b')
net = slim.flatten(net, scope='PreLogitsFlatten')
end_points['PreLogitsFlatten'] = net
# 1536
logits = slim.fully_connected(net, num_classes, activation_fn=None,
scope='Logits')
end_points['Logits'] = logits
end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return logits, end_points
inception_v4.default_image_size = 299
inception_v4_arg_scope = inception_utils.inception_arg_scope
| apache-2.0 |
mupif/mupif | mupif/EnsightReader2.py | 1 | 13109 | #
# MuPIF: Multi-Physics Integration Framework
# Copyright (C) 2010-2015 Borek Patzak
#
# Czech Technical University, Faculty of Civil Engineering,
# Department of Structural Mechanics, 166 29 Prague, Czech Republic
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA
#
from builtins import range
import re
from . import Mesh
from . import Vertex
from . import Cell
from . import BBox
from . import Field
from . import FieldID
from . import ValueType
debug = 0
def readEnsightGeo(name, partFilter, partRec):
"""
Reads Ensight geometry file (Ensight6 format) and returns corresponding Mesh object instance. Supports only unstructured meshes.
:param str name: Path to Ensight geometry file (\*.geo)
:param tuple partFilter: Only parts with id contained in partFiler will be imported
:param list partRec: A list containing info about individual parts (number of elements). Needed by readEnsightField
:return: mesh
:rtype: Mesh.Mesh
"""
vertexMapping = {}
vertices = []
cells = []
vnum = 0
enum = 0
# open the geo file
f = open(name, 'r')
if debug:
print("Importing geometry from %s" % name)
mesh = Mesh.UnstructuredMesh()
# process header (6 lines)
desc1 = f.readline()
desc2 = f.readline()
nodeidrec = f.readline()
# check if nodal ids given -> required
if not re.match('node\s+id\s+given', nodeidrec):
print("Given node ids required")
return
elemidrec = f.readline()
# check if element ids given -> required
if not re.match('element\s+id\s+given', elemidrec):
print("Given element ids required")
return
coordkwdrec = f.readline()
numberOfUnstructuredNodes = int(f.readline())
# check
# read unstructured coordinates
for i in range(numberOfUnstructuredNodes):
line = f.readline()
match = re.match('(.{8})(.{12})(.{12})(.{12})', line)
# print line
if match:
id = int(match.group(1))
x = float(match.group(2))
y = float(match.group(3))
z = float(match.group(4))
# print id, x, y, z
vertexMapping[id] = vnum # remember mapping id -> vertex number
vertices.append(Vertex.Vertex(vnum, id, (x, y, z)))
# increment vertex counter
vnum = vnum+1
# read parts in sequential order
line = f.readline()
while line:
match = re.search('\s*part\s+(\d+)', line)
if match:
partnum = int(match.group(1))
partRec.append({}) # add empty dict for each part containing number of elements for each elemeet type
if partnum in partFilter:
if debug:
print("Importing part %d" % partnum)
partdesc = f.readline().rstrip('\r\n')
# process part
# get element type
line = f.readline()
(line, enum) = readEnsightGeo_Part(f, line, mesh, enum, cells, vertexMapping, partnum, partdesc, partRec)
else:
line = f.readline()
else:
line = f.readline()
if debug:
print("Setting up mesh: %d vertices, %d cells" % (vnum, enum))
print(len(vertices), len(cells))
mesh.setup(vertices, cells)
return mesh
def readEnsightGeo_Part(f, line, mesh, enum, cells, vertexMapping, partnum, partdesc, partRec):
"""
Reads single cell part geometry from an Ensight file.
:param File f: File object
:param str line: Current line to process (should contain element type)
:param Mesh.Mesh mesh: Mupif mesh object to accommodate new cells
:param int enum: Accumulated cell number
:param list cells: List of individual Cells
:param dict vertexMapping: Map from vertex label (as given in Ensight file) to local number
:param int partnum: Part number
:param list partdesc: Partition description record
:param list partRec: Output agrument (list) containing info about individual parts (number of elements). Needed by readEnsightField
:return: tuple (line, cell number)
:rtype: tuple (line, enum)
"""
# if the next line is not next part record, then should be element section
while not re.search('\s*part\s+(\d+)', line):
if line == '':
break
# ok no "part" keyword, parse element section
eltype = line.rstrip('\r\n')
if debug:
print("(", eltype, ")")
line = f.readline()
nelem = int(line.rstrip('\r\n'))
# remember info to partRec
partRec[partnum-1][eltype] = nelem
if debug:
print("part %s nelem %d" % (partdesc, nelem))
# read individual elements
for i in range(nelem):
elemRec = f.readline()
if eltype == "hexa8":
match = re.match('(.{8})(.{8})(.{8})(.{8})(.{8})(.{8})(.{8})(.{8})(.{8})', elemRec)
if match:
elnum = int(match.group(1))
elnodes = (int(match.group(2)), int(match.group(3)), int(match.group(4)), int(match.group(5)),
int(match.group(6)), int(match.group(7)), int(match.group(8)), int(match.group(9)))
# print ("Brick: %d (%d %d %d %d %d %d %d %d)"%(elnum, elnodes[0],elnodes[1],elnodes[2],elnodes[3],elnodes[4],elnodes[5],elnodes[6],elnodes[7]))
_vert = [vertexMapping[i] for i in elnodes]
cells.append(Cell.Brick_3d_lin(mesh, enum, enum, tuple(_vert)))
enum = enum+1
elif eltype == "quad4":
match = re.match('(.{8})(.{8})(.{8})(.{8})(.{8})', elemRec)
if match:
elnum = int(match.group(1))
elnodes = (int(match.group(2)), int(match.group(3)), int(match.group(4)), int(match.group(5)))
if debug:
print("Quad: %d (%d %d %d %d)" % (elnum, elnodes[0], elnodes[1], elnodes[2], elnodes[3]))
_vert = [vertexMapping[i] for i in elnodes]
cells.append(Cell.Quad_2d_lin(mesh, enum, enum, tuple(_vert)))
enum = enum+1
else:
pass
print("Element type %s not suported" % eltype)
# finished parsing part for specific element type
line = f.readline()
# next part record found
return line, enum
def readEnsightField(name, parts, partRec, type, fieldID, mesh, units, time):
"""
Reads either Per-node or Per-element variable file and returns corresponding Field representation.
:param str name: Input field name with variable data
:param tuple parts: Only parts with id contained in partFiler will be imported
:param list partRec: A list containing info about individual parts (number of elements per each element type).
:param int type: Determines type of field values: type = 1 scalar, type = 3 vector, type = 6 tensor
:param FieldID fieldID: Field type (displacement, strain, temperature ...)
:param Mesh.Mesh mesh: Corresponding mesh
:param PhysicalUnit units: field units
:param PhysicalQuantity time: time
:return: FieldID for unknowns
:rtype: Field
"""
vertexVals = []
cellVals = []
indx = list(range(6))
values = []
if type == 1:
ftype = ValueType.Scalar
elif type == 3:
ftype = ValueType.Vector
else:
ftype = ValueType.Tensor
# open the geo file
f = open(name, 'r')
# get variable name (1st line)
varname = f.readline().rstrip('\r\n')
if debug:
print("Importing %s from %s" % (varname, name))
# now check if nodal records available or part (cell records)
line = f.readline()
match = re.match('part\s+(\d+)', line)
if not match:
# nodal (vertex based specification)
size = mesh.getNumberOfVertices() * type
print("Expecting ", mesh.getNumberOfVertices(), " nodal records in ", size//6, " lines")
# read nodeal variables
for i in range(size//6): # six values per row in fixed format 12.5e
for j in indx:
try:
vertexVals.append(float(line[j*12:(j+1)*12]))
except:
print("exception....", j, line, ">", line[j*12:(j+1)*12])
line = f.readline()
# parse remaining values
# line = f.readline()
for j in range(size % 6):
vertexVals.append(float(line[j*12:(j+1)*12]))
if size % 6 > 0:
line = f.readline()
# done parsing nodal record(s)
# so this should be per-vertex variable file -> vertex based field
# convert vertexVals into form required by field
for i in range(mesh.getNumberOfVertices()):
if type == 1: # scalar
values.append((vertexVals[i],))
elif type == 3: # vector
values.append((vertexVals[i*3], vertexVals[i*3+1], vertexVals[i*3+2]))
elif type == 6: # tensor
values.append((vertexVals[i*6], vertexVals[i*6+1],
vertexVals[i*6+2], vertexVals[i*6+3],
vertexVals[i*6+4], vertexVals[i*6+4]))
field = Field.Field(mesh, fieldID, ftype, units, time, values, Field.FieldType.FT_vertexBased)
return field
else:
# ok nodal section missing, parts should provide per-cell variables
while line:
match = re.search('\s*part\s+(\d+)', line)
if match:
partnum = int(match.group(1))
if partnum in parts:
if debug:
print("Importing part %d" % partnum)
# get element type
line = f.readline()
# if the next line is not next part record, then should be element section
while not re.search('\s*part\s+(\d+)', line):
# ok no "part" keyword, parse element section
eltype = line.rstrip('\r\n')
if debug:
print("eltype:", eltype)
nelem = partRec[partnum-1][eltype] # get number of elements in part
if debug:
print("(", eltype, nelem, ")")
size = nelem * type
cellVals = [] # empty values for each element group
for i in range(size//6): # six values per row in fixed format 12.5e
line = f.readline()
# print ".",
for j in indx:
cellVals.append(float(line[j*12:(j+1)*12]))
# parse remaining values
line = f.readline()
for j in range(size % 6):
cellVals.append(float(line[j*12:(j+1)*12]))
if size % 6 > 0:
line = f.readline()
# print "%"
# now convert that into format required by filed
for i in range(nelem):
if type == 1: # scalar
values.append((cellVals[i],))
elif type == 3: # vector
values.append((cellVals[i*3], cellVals[i*3+1], cellVals[i*3+2]))
elif type == 6: # tensor
values.append((cellVals[i*6], cellVals[i*6+1],
cellVals[i*6+2], cellVals[i*6+3],
cellVals[i*6+4], cellVals[i*6+4]))
if debug:
print("done importing element section")
# done parsing cell record(s) in part
else: # if (partnum in parts): proceed to next part
line = f.readline()
else:
line = f.readline()
# so this should be per-cell variable file -> cell based field
field = Field.Field(mesh, fieldID, ftype, units, time, values, Field.FieldType.FT_cellBased)
return field
| lgpl-3.0 |
ClearCorp-dev/odoo | addons/account/report/account_invoice_report.py | 60 | 12934 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
import openerp.addons.decimal_precision as dp
from openerp.osv import fields,osv
class account_invoice_report(osv.osv):
_name = "account.invoice.report"
_description = "Invoices Statistics"
_auto = False
_rec_name = 'date'
def _compute_amounts_in_user_currency(self, cr, uid, ids, field_names, args, context=None):
"""Compute the amounts in the currency of the user
"""
if context is None:
context={}
currency_obj = self.pool.get('res.currency')
currency_rate_obj = self.pool.get('res.currency.rate')
user_currency_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
currency_rate_id = currency_rate_obj.search(cr, uid, [('rate', '=', 1)], limit=1, context=context)[0]
base_currency_id = currency_rate_obj.browse(cr, uid, currency_rate_id, context=context).currency_id.id
res = {}
ctx = context.copy()
for item in self.browse(cr, uid, ids, context=context):
ctx['date'] = item.date
price_total = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.price_total, context=ctx)
price_average = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.price_average, context=ctx)
residual = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.residual, context=ctx)
res[item.id] = {
'user_currency_price_total': price_total,
'user_currency_price_average': price_average,
'user_currency_residual': residual,
}
return res
_columns = {
'date': fields.date('Date', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True),
'product_qty':fields.float('Product Quantity', readonly=True),
'uom_name': fields.char('Reference Unit of Measure', size=128, readonly=True),
'payment_term': fields.many2one('account.payment.term', 'Payment Term', readonly=True),
'period_id': fields.many2one('account.period', 'Force Period', domain=[('state','<>','done')], readonly=True),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'categ_id': fields.many2one('product.category','Category of Product', readonly=True),
'journal_id': fields.many2one('account.journal', 'Journal', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'commercial_partner_id': fields.many2one('res.partner', 'Partner Company', help="Commercial Entity"),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True),
'price_total': fields.float('Total Without Tax', readonly=True),
'user_currency_price_total': fields.function(_compute_amounts_in_user_currency, string="Total Without Tax", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"),
'price_average': fields.float('Average Price', readonly=True, group_operator="avg"),
'user_currency_price_average': fields.function(_compute_amounts_in_user_currency, string="Average Price", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"),
'currency_rate': fields.float('Currency Rate', readonly=True),
'nbr': fields.integer('# of Invoices', readonly=True), # TDE FIXME master: rename into nbr_lines
'type': fields.selection([
('out_invoice','Customer Invoice'),
('in_invoice','Supplier Invoice'),
('out_refund','Customer Refund'),
('in_refund','Supplier Refund'),
],'Type', readonly=True),
'state': fields.selection([
('draft','Draft'),
('proforma','Pro-forma'),
('proforma2','Pro-forma'),
('open','Open'),
('paid','Done'),
('cancel','Cancelled')
], 'Invoice Status', readonly=True),
'date_due': fields.date('Due Date', readonly=True),
'account_id': fields.many2one('account.account', 'Account',readonly=True),
'account_line_id': fields.many2one('account.account', 'Account Line',readonly=True),
'partner_bank_id': fields.many2one('res.partner.bank', 'Bank Account',readonly=True),
'residual': fields.float('Total Residual', readonly=True),
'user_currency_residual': fields.function(_compute_amounts_in_user_currency, string="Total Residual", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"),
'country_id': fields.many2one('res.country', 'Country of the Partner Company'),
}
_order = 'date desc'
_depends = {
'account.invoice': [
'account_id', 'amount_total', 'commercial_partner_id', 'company_id',
'currency_id', 'date_due', 'date_invoice', 'fiscal_position',
'journal_id', 'partner_bank_id', 'partner_id', 'payment_term',
'period_id', 'residual', 'state', 'type', 'user_id',
],
'account.invoice.line': [
'account_id', 'invoice_id', 'price_subtotal', 'product_id',
'quantity', 'uos_id',
],
'product.product': ['product_tmpl_id'],
'product.template': ['categ_id'],
'product.uom': ['category_id', 'factor', 'name', 'uom_type'],
'res.currency.rate': ['currency_id', 'name'],
'res.partner': ['country_id'],
}
def _select(self):
select_str = """
SELECT sub.id, sub.date, sub.product_id, sub.partner_id, sub.country_id,
sub.payment_term, sub.period_id, sub.uom_name, sub.currency_id, sub.journal_id,
sub.fiscal_position, sub.user_id, sub.company_id, sub.nbr, sub.type, sub.state,
sub.categ_id, sub.date_due, sub.account_id, sub.account_line_id, sub.partner_bank_id,
sub.product_qty, sub.price_total / cr.rate as price_total, sub.price_average /cr.rate as price_average,
cr.rate as currency_rate, sub.residual / cr.rate as residual, sub.commercial_partner_id as commercial_partner_id
"""
return select_str
def _sub_select(self):
select_str = """
SELECT min(ail.id) AS id,
ai.date_invoice AS date,
ail.product_id, ai.partner_id, ai.payment_term, ai.period_id,
CASE
WHEN u.uom_type::text <> 'reference'::text
THEN ( SELECT product_uom.name
FROM product_uom
WHERE product_uom.uom_type::text = 'reference'::text
AND product_uom.active
AND product_uom.category_id = u.category_id LIMIT 1)
ELSE u.name
END AS uom_name,
ai.currency_id, ai.journal_id, ai.fiscal_position, ai.user_id, ai.company_id,
count(ail.*) AS nbr,
ai.type, ai.state, pt.categ_id, ai.date_due, ai.account_id, ail.account_id AS account_line_id,
ai.partner_bank_id,
SUM(CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN (- ail.quantity) / u.factor
ELSE ail.quantity / u.factor
END) AS product_qty,
SUM(CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN - ail.price_subtotal
ELSE ail.price_subtotal
END) AS price_total,
CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN SUM(- ail.price_subtotal)
ELSE SUM(ail.price_subtotal)
END / CASE
WHEN SUM(ail.quantity / u.factor) <> 0::numeric
THEN CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN SUM((- ail.quantity) / u.factor)
ELSE SUM(ail.quantity / u.factor)
END
ELSE 1::numeric
END AS price_average,
CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN - ai.residual
ELSE ai.residual
END / CASE
WHEN (( SELECT count(l.id) AS count
FROM account_invoice_line l
LEFT JOIN account_invoice a ON a.id = l.invoice_id
WHERE a.id = ai.id)) <> 0
THEN ( SELECT count(l.id) AS count
FROM account_invoice_line l
LEFT JOIN account_invoice a ON a.id = l.invoice_id
WHERE a.id = ai.id)
ELSE 1::bigint
END::numeric AS residual,
ai.commercial_partner_id as commercial_partner_id,
partner.country_id
"""
return select_str
def _from(self):
from_str = """
FROM account_invoice_line ail
JOIN account_invoice ai ON ai.id = ail.invoice_id
JOIN res_partner partner ON ai.commercial_partner_id = partner.id
LEFT JOIN product_product pr ON pr.id = ail.product_id
left JOIN product_template pt ON pt.id = pr.product_tmpl_id
LEFT JOIN product_uom u ON u.id = ail.uos_id
"""
return from_str
def _group_by(self):
group_by_str = """
GROUP BY ail.product_id, ai.date_invoice, ai.id,
ai.partner_id, ai.payment_term, ai.period_id, u.name, ai.currency_id, ai.journal_id,
ai.fiscal_position, ai.user_id, ai.company_id, ai.type, ai.state, pt.categ_id,
ai.date_due, ai.account_id, ail.account_id, ai.partner_bank_id, ai.residual,
ai.amount_total, u.uom_type, u.category_id, ai.commercial_partner_id, partner.country_id
"""
return group_by_str
def init(self, cr):
# self._table = account_invoice_report
tools.drop_view_if_exists(cr, self._table)
cr.execute("""CREATE or REPLACE VIEW %s as (
%s
FROM (
%s %s %s
) AS sub
JOIN res_currency_rate cr ON (cr.currency_id = sub.currency_id)
WHERE
cr.id IN (SELECT id
FROM res_currency_rate cr2
WHERE (cr2.currency_id = sub.currency_id)
AND ((sub.date IS NOT NULL AND cr2.name <= sub.date)
OR (sub.date IS NULL AND cr2.name <= NOW()))
ORDER BY name DESC LIMIT 1)
)""" % (
self._table,
self._select(), self._sub_select(), self._from(), self._group_by()))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
stadelmanma/OpenPNM | test/unit/Network/GenericNetworkTest.py | 1 | 14373 | import OpenPNM
import scipy as sp
class GenericNetworkTest:
def setup_class(self):
self.net = OpenPNM.Network.Cubic(shape=[10, 10, 10])
def teardown_class(self):
mgr = OpenPNM.Base.Workspace()
mgr.clear()
def test_find_connected_pores_numeric_not_flattend(self):
a = self.net.find_connected_pores(throats=[0, 1])
assert sp.all(a.flatten() == [0, 1, 1, 2])
def test_find_connected_pores_numeric_flattend(self):
a = self.net.find_connected_pores(throats=[0, 1], flatten=True)
assert sp.all(a == [0, 1, 2])
def test_find_connected_pores_boolean_flattend(self):
Tind = sp.zeros((self.net.Nt,), dtype=bool)
Tind[[0, 1]] = True
a = self.net.find_connected_pores(throats=Tind, flatten=True)
assert sp.all(a == [0, 1, 2])
def test_find_connected_pores_empty_flattend(self):
a = self.net.find_connected_pores(throats=[], flatten=True)
assert sp.shape(a) == (0, )
def test_find_neighbor_pores_numeric(self):
a = self.net.find_neighbor_pores(pores=[])
assert sp.size(a) == 0
def test_find_neighbor_pores_boolean(self):
Pind = sp.zeros((self.net.Np,), dtype=bool)
Pind[[0, 1]] = True
a = self.net.find_neighbor_pores(pores=Pind)
assert sp.all(a == [2, 10, 11, 100, 101])
def test_find_neighbor_pores_numeric_union(self):
a = self.net.find_neighbor_pores(pores=[0, 2],
mode='union')
assert sp.all(a == [1, 3, 10, 12, 100, 102])
def test_find_neighbor_pores_numeric_intersection(self):
a = self.net.find_neighbor_pores(pores=[0, 2],
mode='intersection')
assert sp.all(a == [1])
def test_find_neighbor_pores_numeric_notintersection(self):
a = self.net.find_neighbor_pores(pores=[0, 2],
mode='not_intersection')
assert sp.all(a == [3, 10, 12, 100, 102])
def test_find_neighbor_pores_numeric_union_incl_self(self):
a = self.net.find_neighbor_pores(pores=[0, 2],
mode='union',
excl_self=False)
assert sp.all(a == [0, 1, 2, 3, 10, 12, 100, 102])
def test_find_neighbor_pores_numeric_intersection_incl_self(self):
a = self.net.find_neighbor_pores(pores=[0, 2],
mode='intersection',
excl_self=False)
assert sp.all(a == [1])
def test_find_neighbor_pores_numeric_notintersection_incl_self(self):
a = self.net.find_neighbor_pores(pores=[0, 2],
mode='not_intersection',
excl_self=False)
assert sp.all(a == [0, 2, 3, 10, 12, 100, 102])
def test_find_neighbor_throats_empty(self):
a = self.net.find_neighbor_throats(pores=[])
assert sp.size(a) == 0
def test_find_neighbor_throats_boolean(self):
Pind = sp.zeros((self.net.Np,), dtype=bool)
Pind[[0, 1]] = True
a = self.net.find_neighbor_throats(pores=Pind)
assert sp.all(a == [0, 1, 900, 901, 1800, 1801])
def test_find_neighbor_throats_numeric_union(self):
a = self.net.find_neighbor_throats(pores=[0, 2], mode='union')
assert sp.all(a == [0, 1, 2, 900, 902, 1800, 1802])
def test_find_neighbor_throats_numeric_intersection(self):
a = self.net.find_neighbor_throats(pores=[0, 2], mode='intersection')
assert sp.size(a) == 0
def test_find_neighbor_throats_numeric_not_intersection(self):
a = self.net.find_neighbor_throats(pores=[0, 2],
mode='not_intersection')
assert sp.all(a == [0, 1, 2, 900, 902, 1800, 1802])
def test_num_neighbors_empty(self):
a = self.net.num_neighbors(pores=[], element='pores')
assert sp.size(a) == 0
a = self.net.num_neighbors(pores=[], element='throats')
assert sp.size(a) == 0
def test_num_neighbors_pores_flattened(self):
a = self.net.num_neighbors(pores=0, element='pores', flatten=True)
assert a == 3
assert isinstance(a, int)
a = self.net.num_neighbors(pores=[0, 2], element='pores', flatten=True)
assert a == 6
assert isinstance(a, int)
def test_num_neighbors_pores_with_modes(self):
a = self.net.num_neighbors(pores=[0, 2], element='pores', mode='union',
flatten=True)
assert a == 6
a = self.net.num_neighbors(pores=[0, 2], element='pores',
mode='intersection', flatten=True)
assert a == 1
a = self.net.num_neighbors(pores=[0, 2], element='pores',
mode='not_intersection', flatten=True)
assert a == 5
def test_num_neighbors_pores_notflattened(self):
a = self.net.num_neighbors(pores=[0, 2], flatten=False)
assert sp.all(a == [3, 4])
a = self.net.num_neighbors(pores=0, flatten=False)
assert sp.all(a == [3])
assert isinstance(a, sp.ndarray)
def test_num_neighbors_throats_flattened(self):
a = self.net.num_neighbors(pores=0, element='throats', flatten=True)
assert a == 3
a = self.net.num_neighbors(pores=[0, 1], element='throats',
flatten=True)
assert a == 6
self.net.extend(throat_conns=[[0, 1], [0, 2]])
a = self.net.num_neighbors(pores=0, element='throats', flatten=True)
assert a == 5
a = self.net.num_neighbors(pores=[0, 1], element='throats',
flatten=True)
assert a == 8
self.net.trim(throats=self.net.Ts[-2:])
def test_num_neighbors_throats_with_modes(self):
a = self.net.num_neighbors(pores=[0, 1], element='throats',
mode='union', flatten=True)
assert a == 6
self.net.extend(throat_conns=[[0, 1], [0, 2]])
a = self.net.num_neighbors(pores=[0, 1], element='throats',
mode='union', flatten=True)
assert a == 8
a = self.net.num_neighbors(pores=[0, 1], element='throats',
mode='intersection', flatten=True)
assert a == 2
a = self.net.num_neighbors(pores=[0, 1], element='throats',
mode='not_intersection', flatten=True)
assert a == 6
self.net.trim(throats=self.net.Ts[-2:])
def test_num_neighbors_throats_not_flattened(self):
a = self.net.num_neighbors(pores=0, element='throats', flatten=False)
assert sp.all(a == [3])
a = self.net.num_neighbors(pores=[0, 1, 2, 3], element='throats',
flatten=False)
assert sp.all(a == [3, 4, 4, 4])
self.net.extend(throat_conns=[[0, 1], [0, 2]])
a = self.net.num_neighbors(pores=0, element='throats', flatten=False)
assert sp.all(a == [5])
a = self.net.num_neighbors(pores=[0, 1, 2, 3], element='throats',
flatten=False)
assert sp.all(a == [5, 5, 5, 4])
self.net.trim(throats=self.net.Ts[-2:])
def test_find_interface_throats(self):
self.net['pore.domain1'] = False
self.net['pore.domain2'] = False
self.net['pore.domain3'] = False
self.net['pore.domain1'][[0, 1, 2]] = True
self.net['pore.domain2'][[100, 101, 102]] = True
self.net['pore.domain3'][900:] = True
a = self.net.find_interface_throats(labels=['domain1', 'domain2'])
assert sp.all(a == [1800, 1801, 1802])
a = self.net.find_interface_throats(labels=['domain1', 'domain3'])
assert sp.size(a) == 0
def test_check_network_health_healthy(self):
a = self.net.check_network_health()
items = set(['disconnected_clusters',
'isolated_pores',
'trim_pores',
'duplicate_throats',
'bidirectional_throats',
'headless_throats',
'looped_throats'])
assert items == a.keys()
assert sp.size(list(a.values())) == 0
def test_check_network_isolated_pores(self):
net = OpenPNM.Network.Cubic(shape=[5, 5, 5])
Ts = net.find_neighbor_throats(pores=0)
net.trim(throats=Ts)
a = net.check_network_health()
assert a['isolated_pores'] == 0
net.trim(a['trim_pores'])
a = net.check_network_health()
assert sp.size(list(a.values())) == 0
def test_check_network_health_duplicate_throat(self):
net = OpenPNM.Network.Cubic(shape=[5, 5, 5])
P12 = net['throat.conns'][0]
net.extend(throat_conns=[P12])
a = net.check_network_health()
assert len(a['duplicate_throats']) == 1
assert len(a['duplicate_throats'][0]) == 2
def test_check_network_health_triplicate_throats(self):
net = OpenPNM.Network.Cubic(shape=[5, 5, 5])
P12 = net['throat.conns'][0]
net.extend(throat_conns=[P12])
net.extend(throat_conns=[P12])
a = net.check_network_health()
assert len(a['duplicate_throats']) == 1
assert len(a['duplicate_throats'][0]) == 3
def test_check_network_health_multiple_duplicate_throats(self):
net = OpenPNM.Network.Cubic(shape=[5, 5, 5])
P12 = net['throat.conns'][0]
net.extend(throat_conns=[P12])
P12 = net['throat.conns'][1]
net.extend(throat_conns=[P12])
a = net.check_network_health()
assert len(a['duplicate_throats']) == 2
assert len(a['duplicate_throats'][1]) == 2
def test_check_network_health_bidirectional_throats(self):
net = OpenPNM.Network.Cubic(shape=[5, 5, 5])
P12 = net['throat.conns'][0]
net['throat.conns'][0] = [P12[1], P12[0]]
a = net.check_network_health()
assert sp.size(a['bidirectional_throats']) == 1
assert sp.size(a['duplicate_throats']) == 0
def test_check_network_health_headless_throats(self):
net = OpenPNM.Network.Cubic(shape=[5, 5, 5])
net.extend(throat_conns=[[5, 5555]])
a = net.check_network_health()
assert a['headless_throats'] == sp.array([300])
def test_check_network_health_looped_throats(self):
net = OpenPNM.Network.Cubic(shape=[5, 5, 5])
net.extend(throat_conns=[[5, 5]])
a = net.check_network_health()
assert a['looped_throats'] == sp.array([300])
def test_find_nearby_pores_distance_1(self):
a = self.net.find_nearby_pores(pores=[0, 1], distance=1)
b = self.net.find_neighbor_pores(pores=[0, 1], flatten=False)
assert sp.all([sp.all(a[i] == b[i]) for i in range(0, len(a))])
def test_find_nearby_pores_distance_2(self):
a = self.net.find_nearby_pores(pores=[0, 1], distance=2)
assert sp.all([sp.size(a[i]) for i in [0, 1]] == [10, 14])
def test_find_nearby_pores_distance_0(self):
a = self.net.find_nearby_pores(pores=[0, 1], distance=0)
assert sp.shape(a) == (2, 0)
def test_find_nearby_pores_distance_1_flattened(self):
a = self.net.find_nearby_pores(pores=[0, 1], distance=1, flatten=True)
b = self.net.find_neighbor_pores(pores=[0, 1])
assert sp.all(a == b)
def test_find_nearby_pores_distance_2_flattened(self):
a = self.net.find_nearby_pores(pores=[0, 1], distance=2, flatten=True)
assert sp.size(a) == 15
def test_find_nearby_pores_distance_2_flattened_inclself(self):
a = self.net.find_nearby_pores(pores=[0, 1],
distance=2,
flatten=True,
excl_self=False)
assert sp.size(a) == 17
assert sp.all(sp.in1d([0, 1], a))
def test_add_boundary_pores_cubic(self):
net = OpenPNM.Network.Cubic(shape=[3, 3, 3], spacing=1)
net.add_boundary_pores(pores=net.pores('top'), offset=[0, 0, 1])
assert net.Np == 36
assert net.Nt == 63
def test_add_boundary_pores_cubic_2D(self):
net = OpenPNM.Network.Cubic(shape=[3, 3, 1], spacing=1)
Ps = net.Ps
net.add_boundary_pores(pores=Ps, offset=[0, 0, 1])
assert net.Np == 18
assert net.Nt == 21
net.add_boundary_pores(pores=Ps, offset=[0, 0, -1])
assert net.Np == 27
assert net.Nt == 30
def test_add_boundary_pores_cubic_custom_label(self):
net = OpenPNM.Network.Cubic(shape=[3, 3, 3], spacing=1)
Ps = net.pores('top')
net.add_boundary_pores(pores=Ps,
offset=[0, 0, 1],
apply_label='pore.test')
assert 'pore.test' in net.labels()
Ps = net.pores('bottom')
net.add_boundary_pores(pores=Ps,
offset=[0, 0, -1],
apply_label='test2')
assert 'pore.test2' in net.labels()
def test_add_boundary_pores_cubicdual(self):
net = OpenPNM.Network.CubicDual(shape=[5, 5, 5],
label_1='primary',
label_2='secondary')
Ps = net.pores(labels=['surface', 'bottom'], mode='intersection')
net.add_boundary_pores(pores=Ps, offset=[0, 0, -0.5])
Ps2 = net.pores(labels=['boundary'], mode='intersection')
assert Ps.size == Ps2.size
assert ~sp.any(sp.in1d(Ps, Ps2))
def test_add_boundary_pores_delaunay(self):
net = OpenPNM.Network.Delaunay(num_pores=30, domain_size=[1, 1, 1])
throats = net.Nt
pores = sp.random.randint(30, size=5)
net.add_boundary_pores(pores=pores, offset=[0, 0, 1])
assert net.Np == 35
assert net.Nt == throats + 5
def test_add_boundary_pores_delaunaycubic(self):
net = OpenPNM.Network.DelaunayCubic(shape=[3, 3, 3], spacing=1)
throats = net.Nt
pores = sp.random.randint(27, size=5)
net.add_boundary_pores(pores=pores, offset=[0, 0, 1])
assert net.Np == 32
assert net.Nt == throats + 5
| mit |
alkalait/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | Chapter3_MCMC/github_pull.py | 95 | 2327 | #github data scrapper
"""
variables of interest:
indp. variables
- language, given as a binary variable. Need 4 positions for 5 langagues
- #number of days created ago, 1 position
- has wiki? Boolean, 1 position
- followers, 1 position
- following, 1 position
- constant
dep. variables
-stars/watchers
-forks
"""
from json import loads
import datetime
import numpy as np
from requests import get
MAX = 8000000
today = datetime.datetime.today()
randint = np.random.randint
N = 120 #sample size.
auth = ("username", "password" )
language_mappings = {"Python": 0, "JavaScript": 1, "Ruby": 2, "Java":3, "Shell":4, "PHP":5}
#define data matrix:
X = np.zeros( (N , 12), dtype = int )
for i in xrange(N):
is_fork = True
is_valid_language = False
while is_fork == True or is_valid_language == False:
is_fork = True
is_valid_language = False
params = {"since":randint(0, MAX ) }
r = get("https://api.github.com/repositories", params = params, auth=auth )
results = loads( r.text )[0]
#im only interested in the first one, and if it is not a fork.
is_fork = results["fork"]
r = get( results["url"], auth = auth)
#check the language
repo_results = loads( r.text )
try:
language_mappings[ repo_results["language" ] ]
is_valid_language = True
except:
pass
#languages
X[ i, language_mappings[ repo_results["language" ] ] ] = 1
#delta time
X[ i, 6] = ( today - datetime.datetime.strptime( repo_results["created_at"][:10], "%Y-%m-%d" ) ).days
#haswiki
X[i, 7] = repo_results["has_wiki"]
#get user information
r = get( results["owner"]["url"] , auth = auth)
user_results = loads( r.text )
X[i, 8] = user_results["following"]
X[i, 9] = user_results["followers"]
#get dep. data
X[i, 10] = repo_results["watchers_count"]
X[i, 11] = repo_results["forks_count"]
print
print " -------------- "
print i, ": ", results["full_name"], repo_results["language" ], repo_results["watchers_count"], repo_results["forks_count"]
print " -------------- "
print
np.savetxt("data/github_data.csv", X, delimiter=",", fmt="%d" )
| mit |
godfather1103/WeiboRobot | python27/1.0/lib/ctypes/test/test_unicode.py | 35 | 5126 | # coding: latin-1
import unittest
import ctypes
from ctypes.test import need_symbol
import _ctypes_test
@need_symbol('c_wchar')
class UnicodeTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
dll = ctypes.CDLL(_ctypes_test.__file__)
cls.wcslen = dll.my_wcslen
cls.wcslen.argtypes = [ctypes.c_wchar_p]
def setUp(self):
self.prev_conv_mode = ctypes.set_conversion_mode("ascii", "strict")
def tearDown(self):
ctypes.set_conversion_mode(*self.prev_conv_mode)
def test_ascii_strict(self):
wcslen = self.wcslen
ctypes.set_conversion_mode("ascii", "strict")
# no conversions take place with unicode arguments
self.assertEqual(wcslen(u"abc"), 3)
self.assertEqual(wcslen(u"ab\u2070"), 3)
# string args are converted
self.assertEqual(wcslen("abc"), 3)
self.assertRaises(ctypes.ArgumentError, wcslen, "abä")
def test_ascii_replace(self):
wcslen = self.wcslen
ctypes.set_conversion_mode("ascii", "replace")
self.assertEqual(wcslen(u"abc"), 3)
self.assertEqual(wcslen(u"ab\u2070"), 3)
self.assertEqual(wcslen("abc"), 3)
self.assertEqual(wcslen("abä"), 3)
def test_ascii_ignore(self):
wcslen = self.wcslen
ctypes.set_conversion_mode("ascii", "ignore")
self.assertEqual(wcslen(u"abc"), 3)
self.assertEqual(wcslen(u"ab\u2070"), 3)
# ignore error mode skips non-ascii characters
self.assertEqual(wcslen("abc"), 3)
self.assertEqual(wcslen("äöüß"), 0)
def test_latin1_strict(self):
wcslen = self.wcslen
ctypes.set_conversion_mode("latin-1", "strict")
self.assertEqual(wcslen(u"abc"), 3)
self.assertEqual(wcslen(u"ab\u2070"), 3)
self.assertEqual(wcslen("abc"), 3)
self.assertEqual(wcslen("äöüß"), 4)
def test_buffers(self):
ctypes.set_conversion_mode("ascii", "strict")
buf = ctypes.create_unicode_buffer("abc")
self.assertEqual(len(buf), 3+1)
ctypes.set_conversion_mode("ascii", "replace")
buf = ctypes.create_unicode_buffer("abäöü")
self.assertEqual(buf[:], u"ab\uFFFD\uFFFD\uFFFD\0")
self.assertEqual(buf[::], u"ab\uFFFD\uFFFD\uFFFD\0")
self.assertEqual(buf[::-1], u"\0\uFFFD\uFFFD\uFFFDba")
self.assertEqual(buf[::2], u"a\uFFFD\uFFFD")
self.assertEqual(buf[6:5:-1], u"")
ctypes.set_conversion_mode("ascii", "ignore")
buf = ctypes.create_unicode_buffer("abäöü")
# is that correct? not sure. But with 'ignore', you get what you pay for..
self.assertEqual(buf[:], u"ab\0\0\0\0")
self.assertEqual(buf[::], u"ab\0\0\0\0")
self.assertEqual(buf[::-1], u"\0\0\0\0ba")
self.assertEqual(buf[::2], u"a\0\0")
self.assertEqual(buf[6:5:-1], u"")
@need_symbol('c_wchar')
class StringTestCase(UnicodeTestCase):
@classmethod
def setUpClass(cls):
super(StringTestCase, cls).setUpClass()
cls.func = ctypes.CDLL(_ctypes_test.__file__)._testfunc_p_p
def setUp(self):
func = self.func
self.prev_conv_mode = ctypes.set_conversion_mode("ascii", "strict")
func.argtypes = [ctypes.c_char_p]
func.restype = ctypes.c_char_p
def tearDown(self):
func = self.func
ctypes.set_conversion_mode(*self.prev_conv_mode)
func.argtypes = None
func.restype = ctypes.c_int
def test_ascii_replace(self):
func = self.func
ctypes.set_conversion_mode("ascii", "strict")
self.assertEqual(func("abc"), "abc")
self.assertEqual(func(u"abc"), "abc")
self.assertRaises(ctypes.ArgumentError, func, u"abä")
def test_ascii_ignore(self):
func = self.func
ctypes.set_conversion_mode("ascii", "ignore")
self.assertEqual(func("abc"), "abc")
self.assertEqual(func(u"abc"), "abc")
self.assertEqual(func(u"äöüß"), "")
def test_ascii_replace(self):
func = self.func
ctypes.set_conversion_mode("ascii", "replace")
self.assertEqual(func("abc"), "abc")
self.assertEqual(func(u"abc"), "abc")
self.assertEqual(func(u"äöüß"), "????")
def test_buffers(self):
ctypes.set_conversion_mode("ascii", "strict")
buf = ctypes.create_string_buffer(u"abc")
self.assertEqual(len(buf), 3+1)
ctypes.set_conversion_mode("ascii", "replace")
buf = ctypes.create_string_buffer(u"abäöü")
self.assertEqual(buf[:], "ab???\0")
self.assertEqual(buf[::], "ab???\0")
self.assertEqual(buf[::-1], "\0???ba")
self.assertEqual(buf[::2], "a??")
self.assertEqual(buf[6:5:-1], "")
ctypes.set_conversion_mode("ascii", "ignore")
buf = ctypes.create_string_buffer(u"abäöü")
# is that correct? not sure. But with 'ignore', you get what you pay for..
self.assertEqual(buf[:], "ab\0\0\0\0")
self.assertEqual(buf[::], "ab\0\0\0\0")
self.assertEqual(buf[::-1], "\0\0\0\0ba")
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
chaosim/dao | samples/hello.py | 1 | 1301 | from dao import word
from samplevars import x
def parse(grammar_element, text):
x = Var()
code = grammar_element(x)+x
return eval([code, text])
def match(grammar_element, text):
x = Var()
code = grammar_element(x)
return eval([code, text])
print parse(word, 'hello')
print match(word, 'hello')
def hello(x):
return word('hello')+some(space)+word(x)
#[sequence, [word, 'hello'] [some, [space]] [word, x]]
print parse(hello, 'hello world')
print match(hello, 'hello world')
#
def f():
global a
a1 = 1
if a1:
a2 = 2
else:
pass
a3 = phi(a1, a2)
use(a3)
def f():
global a
a1 = 1
if a1:
a2 = 2
else:
a3 = 3
a4 = phi(a2, a3)
print a4
use(a4)
a = 3
def f():
global a
a = phi(a, a)
a = a - 1
f()
use(a)
a1 = 3
def f():
global a
a3 = phi(a1, a2)
a2 = a3-1
f()
use(a2)
i1 = 0
j1 = 0
def f():
i3 = phi(i1, i2) #i3 = phi(0, j3)
j3 = phi(j1, j2) #j3 = phi(0, j3+1)
i2 = j3
j2 = i2+1 #j2 = j3+1
g()
use(i2, j2)
i1 = 0
j1 = 0
def f():
i3 = phi(i1, i2) #i3 = phi(0, 0)
j3 = phi(j1, j2) #j3 = phi(0, j3+1)
i2 = 0
j2 = j2+1 #j2 = j3+1
g()
i5 = phi(i3, i4(g()))
i6 = i5+1
use(i6, j2) | gpl-3.0 |
nbp/git-repo | editor.py | 85 | 2660 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import re
import sys
import subprocess
import tempfile
from error import EditorError
class Editor(object):
"""Manages the user's preferred text editor."""
_editor = None
globalConfig = None
@classmethod
def _GetEditor(cls):
if cls._editor is None:
cls._editor = cls._SelectEditor()
return cls._editor
@classmethod
def _SelectEditor(cls):
e = os.getenv('GIT_EDITOR')
if e:
return e
if cls.globalConfig:
e = cls.globalConfig.GetString('core.editor')
if e:
return e
e = os.getenv('VISUAL')
if e:
return e
e = os.getenv('EDITOR')
if e:
return e
if os.getenv('TERM') == 'dumb':
print(
"""No editor specified in GIT_EDITOR, core.editor, VISUAL or EDITOR.
Tried to fall back to vi but terminal is dumb. Please configure at
least one of these before using this command.""", file=sys.stderr)
sys.exit(1)
return 'vi'
@classmethod
def EditString(cls, data):
"""Opens an editor to edit the given content.
Args:
data : the text to edit
Returns:
new value of edited text; None if editing did not succeed
"""
editor = cls._GetEditor()
if editor == ':':
return data
fd, path = tempfile.mkstemp()
try:
os.write(fd, data)
os.close(fd)
fd = None
if re.compile("^.*[$ \t'].*$").match(editor):
args = [editor + ' "$@"', 'sh']
shell = True
else:
args = [editor]
shell = False
args.append(path)
try:
rc = subprocess.Popen(args, shell=shell).wait()
except OSError as e:
raise EditorError('editor failed, %s: %s %s'
% (str(e), editor, path))
if rc != 0:
raise EditorError('editor failed with exit status %d: %s %s'
% (rc, editor, path))
fd2 = open(path)
try:
return fd2.read()
finally:
fd2.close()
finally:
if fd:
os.close(fd)
os.remove(path)
| apache-2.0 |
jsheperd/rotate_backup | rotate_backup.py | 1 | 2651 | #!/usr/bin/env python
import sys
import os
import glob
import time
class archive:
# The archive class represent an archive media with its age related parameters
def __init__(self, path):
self.path = path
self.time = time.gmtime(os.path.getmtime(path))
self.year = time.strftime("%Y", self.time)
self.month = time.strftime("%Y%m", self.time)
self.week = time.strftime("%Y%W", self.time)
self.day = time.strftime("%Y%m%d", self.time)
self.hour = time.strftime("%Y%m%d%H", self.time)
self.min = time.strftime("%Y%m%d%H%M", self.time)
self.sec = time.strftime("%Y%m%d%H%M%S", self.time)
def rm(self):
# remove the archive from the filesystem
print "rm %s" % self.path
os.remove(self.path)
class binStoreNewest:
# class to store binNum binStores in younger to older order
# each binstore represent an archive, that is the youngest one of its group
def __init__(self, binNum):
self.bins = {}
self.binNum = binNum
def add(self, id, item):
# add a new archive to the clustering
if id in self.bins: # there is an archive from this group already
storedItem = self.bins[id]
if storedItem.time < item.time: # act item is newer then the stored one,
self.bins[id] = item # replace that
else:
self.bins[id] = item # there wasn't archive for this group till now
keys = self.bins.keys()
keys.sort()
for id in keys[:-self.binNum]: # keep the binNum newest ones
del self.bins[id]
def getPaths(self):
return [item.path for item in self.bins.values()]
def getBinTops(sourceArray, binNum, clusterFunction):
# Create groups from the archives by the clusterFunction
# Return with the newest archives from each group for the newset binNum groups
binStore = binStoreNewest(binNum)
for item in sourceArray:
binStore.add(clusterFunction(item), item)
return binStore.getPaths()
if __name__ == '__main__':
# Example usage
if len(sys.argv) >= 2:
files = sys.argv[1:]
else:
files = glob.glob("./data/*")
archives = [archive(filename) for filename in files]
daily = getBinTops(archives, 7, lambda item: item.day)
weekly = getBinTops(archives, 4, lambda item: item.week)
monthly = getBinTops(archives, 12, lambda item: item.month)
yearly = getBinTops(archives, 10, lambda item: item.year)
keepPaths = daily + weekly + monthly + yearly
for item in archives:
if item.path not in keepPaths:
item.rm()
| unlicense |
ehirt/odoo | addons/board/__init__.py | 439 | 1144 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import board
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rghe/ansible | test/units/modules/network/netscaler/test_netscaler_cs_policy.py | 18 | 12568 |
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.compat.tests.mock import patch, Mock, MagicMock, call
import sys
if sys.version_info[:2] != (2, 6):
import requests
from units.modules.utils import set_module_args
from .netscaler_module import TestModule, nitro_base_patcher
class TestNetscalerCSPolicyModule(TestModule):
@classmethod
def setUpClass(cls):
class MockException(Exception):
pass
cls.MockException = MockException
m = MagicMock()
nssrc_modules_mock = {
'nssrc.com.citrix.netscaler.nitro.resource.config.cs': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.cs.cspolicy': m,
}
cls.nitro_specific_patcher = patch.dict(sys.modules, nssrc_modules_mock)
cls.nitro_base_patcher = nitro_base_patcher
@classmethod
def tearDownClass(cls):
cls.nitro_base_patcher.stop()
cls.nitro_specific_patcher.stop()
def set_module_state(self, state):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state=state,
))
def setUp(self):
super(TestNetscalerCSPolicyModule, self).setUp()
self.nitro_base_patcher.start()
self.nitro_specific_patcher.start()
def tearDown(self):
super(TestNetscalerCSPolicyModule, self).tearDown()
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
def test_graceful_nitro_api_import_error(self):
# Stop nitro api patching to cause ImportError
self.set_module_state('present')
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
from ansible.modules.network.netscaler import netscaler_cs_policy
self.module = netscaler_cs_policy
result = self.failed()
self.assertEqual(result['msg'], 'Could not load nitro python sdk')
def test_graceful_nitro_error_on_login(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_cs_policy
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
client_mock = Mock()
client_mock.login = Mock(side_effect=MockException)
m = Mock(return_value=client_mock)
with patch('ansible.modules.network.netscaler.netscaler_cs_policy.get_nitro_client', m):
with patch('ansible.modules.network.netscaler.netscaler_cs_policy.nitro_exception', MockException):
self.module = netscaler_cs_policy
result = self.failed()
self.assertTrue(result['msg'].startswith('nitro exception'), msg='nitro exception during login not handled properly')
def test_graceful_no_connection_error(self):
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_cs_policy
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.ConnectionError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
get_nitro_client=m,
nitro_exception=self.MockException,
):
self.module = netscaler_cs_policy
result = self.failed()
self.assertTrue(result['msg'].startswith('Connection error'), msg='Connection error was not handled gracefully')
def test_graceful_login_error(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_cs_policy
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.SSLError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_cs_policy
result = self.failed()
self.assertTrue(result['msg'].startswith('SSL Error'), msg='SSL Error was not handled gracefully')
def test_create_non_existing_cs_policy(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_cs_policy
cs_policy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
cs_policy_mock.configure_mock(**attrs)
m = MagicMock(return_value=cs_policy_mock)
policy_exists_mock = Mock(side_effect=[False, True])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
ConfigProxy=m,
policy_exists=policy_exists_mock,
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
):
self.module = netscaler_cs_policy
result = self.exited()
cs_policy_mock.assert_has_calls([call.add()])
self.assertTrue(result['changed'], msg='Change not recorded')
def test_update_cs_policy_when_cs_policy_differs(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_cs_policy
cs_policy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
cs_policy_mock.configure_mock(**attrs)
m = MagicMock(return_value=cs_policy_mock)
policy_exists_mock = Mock(side_effect=[True, True])
policy_identical_mock = Mock(side_effect=[False, True])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
ConfigProxy=m,
policy_exists=policy_exists_mock,
policy_identical=policy_identical_mock,
ensure_feature_is_enabled=Mock(),
nitro_exception=self.MockException,
):
self.module = netscaler_cs_policy
result = self.exited()
cs_policy_mock.assert_has_calls([call.update()])
self.assertTrue(result['changed'], msg='Change not recorded')
def test_no_change_to_module_when_all_identical(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_cs_policy
cs_policy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
cs_policy_mock.configure_mock(**attrs)
m = MagicMock(return_value=cs_policy_mock)
policy_exists_mock = Mock(side_effect=[True, True])
policy_identical_mock = Mock(side_effect=[True, True])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
ConfigProxy=m,
policy_exists=policy_exists_mock,
policy_identical=policy_identical_mock,
ensure_feature_is_enabled=Mock(),
nitro_exception=self.MockException,
):
self.module = netscaler_cs_policy
result = self.exited()
self.assertFalse(result['changed'], msg='Erroneous changed status update')
def test_absent_operation(self):
self.set_module_state('absent')
from ansible.modules.network.netscaler import netscaler_cs_policy
cs_policy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
cs_policy_mock.configure_mock(**attrs)
m = MagicMock(return_value=cs_policy_mock)
policy_exists_mock = Mock(side_effect=[True, False])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
ConfigProxy=m,
policy_exists=policy_exists_mock,
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
):
self.module = netscaler_cs_policy
result = self.exited()
cs_policy_mock.assert_has_calls([call.delete()])
self.assertTrue(result['changed'], msg='Changed status not set correctly')
def test_absent_operation_no_change(self):
self.set_module_state('absent')
from ansible.modules.network.netscaler import netscaler_cs_policy
cs_policy_mock = MagicMock()
attrs = {
'diff_object.return_value': {},
}
cs_policy_mock.configure_mock(**attrs)
m = MagicMock(return_value=cs_policy_mock)
policy_exists_mock = Mock(side_effect=[False, False])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
ConfigProxy=m,
policy_exists=policy_exists_mock,
nitro_exception=self.MockException,
ensure_feature_is_enabled=Mock(),
):
self.module = netscaler_cs_policy
result = self.exited()
cs_policy_mock.assert_not_called()
self.assertFalse(result['changed'], msg='Changed status not set correctly')
def test_graceful_nitro_exception_operation_present(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_cs_policy
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
policy_exists=m,
ensure_feature_is_enabled=Mock(),
nitro_exception=MockException
):
self.module = netscaler_cs_policy
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation present'
)
def test_graceful_nitro_exception_operation_absent(self):
self.set_module_state('absent')
from ansible.modules.network.netscaler import netscaler_cs_policy
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
policy_exists=m,
nitro_exception=MockException,
ensure_feature_is_enabled=Mock(),
):
self.module = netscaler_cs_policy
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation absent'
)
def test_ensure_feature_is_enabled_called(self):
self.set_module_state('present')
from ansible.modules.network.netscaler import netscaler_cs_policy
client_mock = Mock()
ensure_feature_is_enabled_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_policy',
get_nitro_client=Mock(return_value=client_mock),
policy_exists=Mock(side_effect=[True, True]),
nitro_exception=self.MockException,
ensure_feature_is_enabled=ensure_feature_is_enabled_mock,
):
self.module = netscaler_cs_policy
result = self.exited()
ensure_feature_is_enabled_mock.assert_has_calls([call(client_mock, 'CS')])
| gpl-3.0 |
cloudera/ibis | ibis/tests/expr/test_pipe.py | 3 | 1763 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import ibis
@pytest.fixture
def pipe_table():
return ibis.table(
[
('key1', 'string'),
('key2', 'string'),
('key3', 'string'),
('value', 'double'),
],
'foo_table',
)
def test_pipe_positional_args(pipe_table):
def my_func(data, foo, bar):
return data[bar] + foo
result = pipe_table.pipe(my_func, 4, 'value')
expected = pipe_table['value'] + 4
assert result.equals(expected)
def test_pipe_keyword_args(pipe_table):
def my_func(data, foo=None, bar=None):
return data[bar] + foo
result = pipe_table.pipe(my_func, foo=4, bar='value')
expected = pipe_table['value'] + 4
assert result.equals(expected)
def test_pipe_pass_to_keyword(pipe_table):
def my_func(x, y, data=None):
return data[x] + y
result = pipe_table.pipe((my_func, 'data'), 'value', 4)
expected = pipe_table['value'] + 4
assert result.equals(expected)
def test_call_pipe_equivalence(pipe_table):
result = pipe_table(lambda x: x['key1'].cast('double').sum())
expected = pipe_table.key1.cast('double').sum()
assert result.equals(expected)
| apache-2.0 |
r2t2sdr/r2t2 | u-boot/tools/buildman/toolchain.py | 5 | 8510 | # Copyright (c) 2012 The Chromium OS Authors.
#
# SPDX-License-Identifier: GPL-2.0+
#
import re
import glob
import os
import bsettings
import command
class Toolchain:
"""A single toolchain
Public members:
gcc: Full path to C compiler
path: Directory path containing C compiler
cross: Cross compile string, e.g. 'arm-linux-'
arch: Architecture of toolchain as determined from the first
component of the filename. E.g. arm-linux-gcc becomes arm
"""
def __init__(self, fname, test, verbose=False):
"""Create a new toolchain object.
Args:
fname: Filename of the gcc component
test: True to run the toolchain to test it
"""
self.gcc = fname
self.path = os.path.dirname(fname)
self.cross = os.path.basename(fname)[:-3]
pos = self.cross.find('-')
self.arch = self.cross[:pos] if pos != -1 else 'sandbox'
env = self.MakeEnvironment()
# As a basic sanity check, run the C compiler with --version
cmd = [fname, '--version']
if test:
result = command.RunPipe([cmd], capture=True, env=env,
raise_on_error=False)
self.ok = result.return_code == 0
if verbose:
print 'Tool chain test: ',
if self.ok:
print 'OK'
else:
print 'BAD'
print 'Command: ', cmd
print result.stdout
print result.stderr
else:
self.ok = True
self.priority = self.GetPriority(fname)
def GetPriority(self, fname):
"""Return the priority of the toolchain.
Toolchains are ranked according to their suitability by their
filename prefix.
Args:
fname: Filename of toolchain
Returns:
Priority of toolchain, 0=highest, 20=lowest.
"""
priority_list = ['-elf', '-unknown-linux-gnu', '-linux',
'-none-linux-gnueabi', '-uclinux', '-none-eabi',
'-gentoo-linux-gnu', '-linux-gnueabi', '-le-linux', '-uclinux']
for prio in range(len(priority_list)):
if priority_list[prio] in fname:
return prio
return prio
def MakeEnvironment(self):
"""Returns an environment for using the toolchain.
Thie takes the current environment, adds CROSS_COMPILE and
augments PATH so that the toolchain will operate correctly.
"""
env = dict(os.environ)
env['CROSS_COMPILE'] = self.cross
env['PATH'] += (':' + self.path)
return env
class Toolchains:
"""Manage a list of toolchains for building U-Boot
We select one toolchain for each architecture type
Public members:
toolchains: Dict of Toolchain objects, keyed by architecture name
paths: List of paths to check for toolchains (may contain wildcards)
"""
def __init__(self):
self.toolchains = {}
self.paths = []
toolchains = bsettings.GetItems('toolchain')
if not toolchains:
print ("Warning: No tool chains - please add a [toolchain] section"
" to your buildman config file %s. See README for details" %
bsettings.config_fname)
for name, value in toolchains:
if '*' in value:
self.paths += glob.glob(value)
else:
self.paths.append(value)
self._make_flags = dict(bsettings.GetItems('make-flags'))
def Add(self, fname, test=True, verbose=False):
"""Add a toolchain to our list
We select the given toolchain as our preferred one for its
architecture if it is a higher priority than the others.
Args:
fname: Filename of toolchain's gcc driver
test: True to run the toolchain to test it
"""
toolchain = Toolchain(fname, test, verbose)
add_it = toolchain.ok
if toolchain.arch in self.toolchains:
add_it = (toolchain.priority <
self.toolchains[toolchain.arch].priority)
if add_it:
self.toolchains[toolchain.arch] = toolchain
def Scan(self, verbose):
"""Scan for available toolchains and select the best for each arch.
We look for all the toolchains we can file, figure out the
architecture for each, and whether it works. Then we select the
highest priority toolchain for each arch.
Args:
verbose: True to print out progress information
"""
if verbose: print 'Scanning for tool chains'
for path in self.paths:
if verbose: print " - scanning path '%s'" % path
for subdir in ['.', 'bin', 'usr/bin']:
dirname = os.path.join(path, subdir)
if verbose: print " - looking in '%s'" % dirname
for fname in glob.glob(dirname + '/*gcc'):
if verbose: print " - found '%s'" % fname
self.Add(fname, True, verbose)
def List(self):
"""List out the selected toolchains for each architecture"""
print 'List of available toolchains (%d):' % len(self.toolchains)
if len(self.toolchains):
for key, value in sorted(self.toolchains.iteritems()):
print '%-10s: %s' % (key, value.gcc)
else:
print 'None'
def Select(self, arch):
"""Returns the toolchain for a given architecture
Args:
args: Name of architecture (e.g. 'arm', 'ppc_8xx')
returns:
toolchain object, or None if none found
"""
for name, value in bsettings.GetItems('toolchain-alias'):
if arch == name:
arch = value
if not arch in self.toolchains:
raise ValueError, ("No tool chain found for arch '%s'" % arch)
return self.toolchains[arch]
def ResolveReferences(self, var_dict, args):
"""Resolve variable references in a string
This converts ${blah} within the string to the value of blah.
This function works recursively.
Args:
var_dict: Dictionary containing variables and their values
args: String containing make arguments
Returns:
Resolved string
>>> bsettings.Setup()
>>> tcs = Toolchains()
>>> tcs.Add('fred', False)
>>> var_dict = {'oblique' : 'OBLIQUE', 'first' : 'fi${second}rst', \
'second' : '2nd'}
>>> tcs.ResolveReferences(var_dict, 'this=${oblique}_set')
'this=OBLIQUE_set'
>>> tcs.ResolveReferences(var_dict, 'this=${oblique}_set${first}nd')
'this=OBLIQUE_setfi2ndrstnd'
"""
re_var = re.compile('(\$\{[a-z0-9A-Z]{1,}\})')
while True:
m = re_var.search(args)
if not m:
break
lookup = m.group(0)[2:-1]
value = var_dict.get(lookup, '')
args = args[:m.start(0)] + value + args[m.end(0):]
return args
def GetMakeArguments(self, board):
"""Returns 'make' arguments for a given board
The flags are in a section called 'make-flags'. Flags are named
after the target they represent, for example snapper9260=TESTING=1
will pass TESTING=1 to make when building the snapper9260 board.
References to other boards can be added in the string also. For
example:
[make-flags]
at91-boards=ENABLE_AT91_TEST=1
snapper9260=${at91-boards} BUILD_TAG=442
snapper9g45=${at91-boards} BUILD_TAG=443
This will return 'ENABLE_AT91_TEST=1 BUILD_TAG=442' for snapper9260
and 'ENABLE_AT91_TEST=1 BUILD_TAG=443' for snapper9g45.
A special 'target' variable is set to the board target.
Args:
board: Board object for the board to check.
Returns:
'make' flags for that board, or '' if none
"""
self._make_flags['target'] = board.target
arg_str = self.ResolveReferences(self._make_flags,
self._make_flags.get(board.target, ''))
args = arg_str.split(' ')
i = 0
while i < len(args):
if not args[i]:
del args[i]
else:
i += 1
return args
| gpl-3.0 |
borjam/exabgp | src/exabgp/bgp/message/update/nlri/vpls.py | 3 | 3647 | # encoding: utf-8
"""
vpls.py
Created by Nikita Shirokov on 2014-06-16.
Copyright (c) 2014-2017 Nikita Shirokov. All rights reserved.
Copyright (c) 2014-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
from struct import unpack
from struct import pack
from exabgp.protocol.family import AFI
from exabgp.protocol.family import SAFI
from exabgp.bgp.message.direction import OUT
from exabgp.bgp.message.notification import Notify
from exabgp.bgp.message.update.nlri.nlri import NLRI
from exabgp.bgp.message.update.nlri.qualifier import RouteDistinguisher
def _unique():
value = 0
while True:
yield value
value += 1
unique = _unique()
@NLRI.register(AFI.l2vpn, SAFI.vpls)
class VPLS(NLRI):
# XXX: Should take AFI, SAFI and OUT.direction as parameter to match other NLRI
def __init__(self, rd, endpoint, base, offset, size):
NLRI.__init__(self, AFI.l2vpn, SAFI.vpls)
self.action = OUT.ANNOUNCE
self.nexthop = None
self.rd = rd
self.base = base
self.offset = offset
self.size = size
self.endpoint = endpoint
self.unique = next(unique)
def feedback(self, action):
if self.nexthop is None and action == OUT.ANNOUNCE:
return 'vpls nlri next-hop missing'
if self.endpoint is None:
return 'vpls nlri endpoint missing'
if self.base is None:
return 'vpls nlri base missing'
if self.offset is None:
return 'vpls nlri offset missing'
if self.size is None:
return 'vpls nlri size missing'
if self.rd is None:
return 'vpls nlri route-distinguisher missing'
if self.base > (0xFFFFF - self.size): # 20 bits, 3 bytes
return 'vpls nlri size inconsistency'
return ''
def assign(self, name, value):
setattr(self, name, value)
def pack_nlri(self, negotiated=None):
return (
b'\x00\x11' # pack('!H',17)
+ self.rd.pack()
+ pack('!HHH', self.endpoint, self.offset, self.size)
+ pack('!L', (self.base << 4) | 0x1)[1:] # setting the bottom of stack, should we ?
)
# XXX: FIXME: we need an unique key here.
# XXX: What can we use as unique key ?
def json(self, compact=None):
content = ', '.join(
[
self.rd.json(),
'"endpoint": %s' % self.endpoint,
'"base": %s' % self.base,
'"offset": %s' % self.offset,
'"size": %s' % self.size,
]
)
return '{ %s }' % (content)
def extensive(self):
return "vpls%s endpoint %s base %s offset %s size %s %s" % (
self.rd,
self.endpoint,
self.base,
self.offset,
self.size,
'' if self.nexthop is None else 'next-hop %s' % self.nexthop,
)
def __str__(self):
return self.extensive()
@classmethod
def unpack_nlri(cls, afi, safi, bgp, action, addpath):
# label is 20bits, stored using 3 bytes, 24 bits
(length,) = unpack('!H', bgp[0:2])
if len(bgp) != length + 2:
raise Notify(3, 10, 'l2vpn vpls message length is not consistent with encoded bgp')
rd = RouteDistinguisher(bgp[2:10])
endpoint, offset, size = unpack('!HHH', bgp[10:16])
base = unpack('!L', b'\x00' + bgp[16:19])[0] >> 4
nlri = cls(rd, endpoint, base, offset, size)
nlri.action = action
# nlri.nexthop = IP.unpack(nexthop)
return nlri, bgp[19:]
| bsd-3-clause |
mrchapp/meta-openembedded | meta-oe/lib/oeqa/selftest/cases/meta_oe_sources.py | 4 | 1206 | import os
import re
import glob as g
import shutil
import tempfile
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_bb_vars
class MetaOESourceMirroring(OESelftestTestCase):
# Can we download everything from the OpenEmbedded Sources Mirror over http only
def test_oe_source_mirror(self):
self.write_config("""
BB_ALLOWED_NETWORKS = " sources.openembedded.org"
MIRRORS = ""
DL_DIR = "${TMPDIR}/test_oe_downloads"
PREMIRRORS = "\\
bzr://.*/.* http://sources.openembedded.org/ \\n \\
cvs://.*/.* http://sources.openembedded.org/ \\n \\
git://.*/.* http://sources.openembedded.org/ \\n \\
gitsm://.*/.* http://sources.openembedded.org/ \\n \\
hg://.*/.* http://sources.openembedded.org/ \\n \\
osc://.*/.* http://sources.openembedded.org/ \\n \\
p4://.*/.* http://sources.openembedded.org/ \\n \\
svn://.*/.* http://sources.openembedded.org/ \\n \\
ftp://.*/.* http://sources.openembedded.org/ \\n \\
http://.*/.* http://sources.openembedded.org/ \\n \\
https://.*/.* http://sources.openembedded.org/ \\n"
""")
bitbake("world --runall fetch")
| mit |
pepeportela/edx-platform | common/djangoapps/util/model_utils.py | 6 | 7263 | """
Utilities for django models.
"""
import re
import unicodedata
from django.conf import settings
from django.dispatch import Signal
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django_countries.fields import Country
from eventtracking import tracker
# The setting name used for events when "settings" (account settings, preferences, profile information) change.
USER_SETTINGS_CHANGED_EVENT_NAME = u'edx.user.settings.changed'
# Used to signal a field value change
USER_FIELD_CHANGED = Signal(providing_args=["user", "table", "setting", "old_value", "new_value"])
def get_changed_fields_dict(instance, model_class):
"""
Helper method for tracking field changes on a model.
Given a model instance and class, return a dict whose keys are that
instance's fields which differ from the last saved ones and whose values
are the old values of those fields. Related fields are not considered.
Args:
instance (Model instance): the model instance with changes that are
being tracked
model_class (Model class): the class of the model instance we are
tracking
Returns:
dict: a mapping of field names to current database values of those
fields, or an empty dict if the model is new
"""
try:
old_model = model_class.objects.get(pk=instance.pk)
except model_class.DoesNotExist:
# Object is new, so fields haven't technically changed. We'll return
# an empty dict as a default value.
return {}
else:
# We want to compare all of the scalar fields on the model, but none of
# the relations.
field_names = [f.name for f in model_class._meta.get_fields() if not f.is_relation] # pylint: disable=protected-access
changed_fields = {
field_name: getattr(old_model, field_name) for field_name in field_names
if getattr(old_model, field_name) != getattr(instance, field_name)
}
return changed_fields
def emit_field_changed_events(instance, user, db_table, excluded_fields=None, hidden_fields=None):
"""Emits a settings changed event for each field that has changed.
Note that this function expects that a `_changed_fields` dict has been set
as an attribute on `instance` (see `get_changed_fields_dict`.
Args:
instance (Model instance): the model instance that is being saved
user (User): the user that this instance is associated with
db_table (str): the name of the table that we're modifying
excluded_fields (list): a list of field names for which events should
not be emitted
hidden_fields (list): a list of field names specifying fields whose
values should not be included in the event (None will be used
instead)
Returns:
None
"""
def clean_field(field_name, value):
"""
Prepare a field to be emitted in a JSON serializable format. If
`field_name` is a hidden field, return None.
"""
if field_name in hidden_fields:
return None
# Country is not JSON serializable. Return the country code.
if isinstance(value, Country):
if value.code:
return value.code
else:
return None
return value
excluded_fields = excluded_fields or []
hidden_fields = hidden_fields or []
changed_fields = getattr(instance, '_changed_fields', {})
for field_name in changed_fields:
if field_name not in excluded_fields:
old_value = clean_field(field_name, changed_fields[field_name])
new_value = clean_field(field_name, getattr(instance, field_name))
emit_setting_changed_event(user, db_table, field_name, old_value, new_value)
# Remove the now inaccurate _changed_fields attribute.
if hasattr(instance, '_changed_fields'):
del instance._changed_fields
def truncate_fields(old_value, new_value):
"""
Truncates old_value and new_value for analytics event emission if necessary.
Args:
old_value(obj): the value before the change
new_value(obj): the new value being saved
Returns:
a dictionary with the following fields:
'old': the truncated old value
'new': the truncated new value
'truncated': the list of fields that have been truncated
"""
# Compute the maximum value length so that two copies can fit into the maximum event size
# in addition to all the other fields recorded.
max_value_length = settings.TRACK_MAX_EVENT / 4
serialized_old_value, old_was_truncated = _get_truncated_setting_value(old_value, max_length=max_value_length)
serialized_new_value, new_was_truncated = _get_truncated_setting_value(new_value, max_length=max_value_length)
truncated_values = []
if old_was_truncated:
truncated_values.append("old")
if new_was_truncated:
truncated_values.append("new")
return {'old': serialized_old_value, 'new': serialized_new_value, 'truncated': truncated_values}
def emit_setting_changed_event(user, db_table, setting_name, old_value, new_value):
"""Emits an event for a change in a setting.
Args:
user (User): the user that this setting is associated with.
db_table (str): the name of the table that we're modifying.
setting_name (str): the name of the setting being changed.
old_value (object): the value before the change.
new_value (object): the new value being saved.
Returns:
None
"""
truncated_fields = truncate_fields(old_value, new_value)
truncated_fields['setting'] = setting_name
truncated_fields['user_id'] = user.id
truncated_fields['table'] = db_table
tracker.emit(
USER_SETTINGS_CHANGED_EVENT_NAME,
truncated_fields
)
# Announce field change
USER_FIELD_CHANGED.send(sender=None, user=user, table=db_table, setting=setting_name,
old_value=old_value, new_value=new_value)
def _get_truncated_setting_value(value, max_length=None):
"""
Returns the truncated form of a setting value.
Returns:
truncated_value (object): the possibly truncated version of the value.
was_truncated (bool): returns true if the serialized value was truncated.
"""
if isinstance(value, basestring) and max_length is not None and len(value) > max_length:
return value[0:max_length], True
else:
return value, False
# Taken from Django 1.8 source code because it's not supported in 1.4
def slugify(value):
"""Converts value into a string suitable for readable URLs.
Converts to ASCII. Converts spaces to hyphens. Removes characters that
aren't alphanumerics, underscores, or hyphens. Converts to lowercase.
Also strips leading and trailing whitespace.
Args:
value (string): String to slugify.
"""
value = force_unicode(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub(r'[-\s]+', '-', value))
| agpl-3.0 |
wweiradio/django | tests/flatpages_tests/test_forms.py | 165 | 4569 | from __future__ import unicode_literals
from django.conf import settings
from django.contrib.flatpages.forms import FlatpageForm
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from django.utils import translation
@modify_settings(INSTALLED_APPS={'append': ['django.contrib.flatpages', ]})
@override_settings(SITE_ID=1)
class FlatpageAdminFormTests(TestCase):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
def setUp(self):
# Site fields cache needs to be cleared after flatpages is added to
# INSTALLED_APPS
Site._meta._expire_cache()
self.form_data = {
'title': "A test page",
'content': "This is a test",
'sites': [settings.SITE_ID],
}
def test_flatpage_admin_form_url_validation(self):
"The flatpage admin form correctly validates urls"
self.assertTrue(FlatpageForm(data=dict(url='/new_flatpage/', **self.form_data)).is_valid())
self.assertTrue(FlatpageForm(data=dict(url='/some.special~chars/', **self.form_data)).is_valid())
self.assertTrue(FlatpageForm(data=dict(url='/some.very_special~chars-here/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a space/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a % char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a ! char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a & char/', **self.form_data)).is_valid())
self.assertFalse(FlatpageForm(data=dict(url='/a ? char/', **self.form_data)).is_valid())
def test_flatpage_requires_leading_slash(self):
form = FlatpageForm(data=dict(url='no_leading_slash/', **self.form_data))
with translation.override('en'):
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['url'], ["URL is missing a leading slash."])
@override_settings(APPEND_SLASH=True,
MIDDLEWARE_CLASSES=['django.middleware.common.CommonMiddleware'])
def test_flatpage_requires_trailing_slash_with_append_slash(self):
form = FlatpageForm(data=dict(url='/no_trailing_slash', **self.form_data))
with translation.override('en'):
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['url'], ["URL is missing a trailing slash."])
@override_settings(APPEND_SLASH=False,
MIDDLEWARE_CLASSES=['django.middleware.common.CommonMiddleware'])
def test_flatpage_doesnt_requires_trailing_slash_without_append_slash(self):
form = FlatpageForm(data=dict(url='/no_trailing_slash', **self.form_data))
self.assertTrue(form.is_valid())
def test_flatpage_admin_form_url_uniqueness_validation(self):
"The flatpage admin form correctly enforces url uniqueness among flatpages of the same site"
data = dict(url='/myflatpage1/', **self.form_data)
FlatpageForm(data=data).save()
f = FlatpageForm(data=data)
with translation.override('en'):
self.assertFalse(f.is_valid())
self.assertEqual(
f.errors,
{'__all__': ['Flatpage with url /myflatpage1/ already exists for site example.com']})
def test_flatpage_admin_form_edit(self):
"""
Existing flatpages can be edited in the admin form without triggering
the url-uniqueness validation.
"""
existing = FlatPage.objects.create(
url="/myflatpage1/", title="Some page", content="The content")
existing.sites.add(settings.SITE_ID)
data = dict(url='/myflatpage1/', **self.form_data)
f = FlatpageForm(data=data, instance=existing)
self.assertTrue(f.is_valid(), f.errors)
updated = f.save()
self.assertEqual(updated.title, "A test page")
def test_flatpage_nosites(self):
data = dict(url='/myflatpage1/', **self.form_data)
data.update({'sites': ''})
f = FlatpageForm(data=data)
self.assertFalse(f.is_valid())
self.assertEqual(
f.errors,
{'sites': [translation.ugettext('This field is required.')]})
| bsd-3-clause |
gpetretto/monty | tests/test_os.py | 2 | 1196 | __author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2014, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = '[email protected]'
__date__ = '1/24/14'
import unittest
import os
from monty.os.path import which, zpath
from monty.os import cd
test_dir = os.path.join(os.path.dirname(__file__), 'test_files')
class PathTest(unittest.TestCase):
def test_which(self):
py = which("python")
self.assertEqual(os.path.basename(py), "python")
def test_zpath(self):
fullzpath = zpath(os.path.join(test_dir, "myfile_gz"))
self.assertEqual(os.path.join(test_dir, "myfile_gz.gz"), fullzpath)
class CdTest(unittest.TestCase):
def test_cd(self):
with cd(test_dir):
self.assertTrue(os.path.exists("empty_file.txt"))
self.assertFalse(os.path.exists("empty_file.txt"))
def test_cd_exception(self):
try:
with cd(test_dir):
self.assertTrue(os.path.exists("empty_file.txt"))
raise RuntimeError()
except:
pass
self.assertFalse(os.path.exists("empty_file.txt"))
if __name__ == "__main__":
unittest.main()
| mit |
double-y/django | tests/forms_tests/tests/test_validators.py | 261 | 1540 | from __future__ import unicode_literals
import re
from unittest import TestCase
from django import forms
from django.core import validators
from django.core.exceptions import ValidationError
class UserForm(forms.Form):
full_name = forms.CharField(
max_length=50,
validators=[
validators.validate_integer,
validators.validate_email,
]
)
string = forms.CharField(
max_length=50,
validators=[
validators.RegexValidator(
regex='^[a-zA-Z]*$',
message="Letters only.",
)
]
)
ignore_case_string = forms.CharField(
max_length=50,
validators=[
validators.RegexValidator(
regex='^[a-z]*$',
message="Letters only.",
flags=re.IGNORECASE,
)
]
)
class TestFieldWithValidators(TestCase):
def test_all_errors_get_reported(self):
form = UserForm({'full_name': 'not int nor mail', 'string': '2 is not correct', 'ignore_case_string': "IgnORE Case strIng"})
self.assertRaises(ValidationError, form.fields['full_name'].clean, 'not int nor mail')
try:
form.fields['full_name'].clean('not int nor mail')
except ValidationError as e:
self.assertEqual(2, len(e.messages))
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['string'], ["Letters only."])
self.assertEqual(form.errors['string'], ["Letters only."])
| bsd-3-clause |
joshuajnoble/ofxPython | example_Callbacks/bin/data/openframeworks_extra.py | 9 | 3244 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.7
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_openframeworks_extra', [dirname(__file__)])
except ImportError:
import _openframeworks_extra
return _openframeworks_extra
if fp is not None:
try:
_mod = imp.load_module('_openframeworks_extra', fp, pathname, description)
finally:
fp.close()
return _mod
_openframeworks_extra = swig_import_helper()
del swig_import_helper
else:
import _openframeworks_extra
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
class CallBack(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CallBack, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CallBack, name)
__repr__ = _swig_repr
def _call(self): return _openframeworks_extra.CallBack__call(self)
__swig_destroy__ = _openframeworks_extra.delete_CallBack
__del__ = lambda self : None;
def call(self,*args,**kwargs):
CallBack._args = args
CallBack._kwargs = kwargs
self._call()
CallBack._args = None
CallBack._kwargs = None
def __init__(self):
this = _openframeworks_extra.new_CallBack()
try: self.this.append(this)
except: self.this = this
CallBack_swigregister = _openframeworks_extra.CallBack_swigregister
CallBack_swigregister(CallBack)
def _getCallBackPointer():
return _openframeworks_extra._getCallBackPointer()
_getCallBackPointer = _openframeworks_extra._getCallBackPointer
# This file is compatible with both classic and new-style classes.
| mit |
eicher31/compassion-switzerland | partner_communication_switzerland/models/partner_communication.py | 3 | 28550 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <[email protected]>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import base64
import time
import logging
import re
from ..wizards.generate_communication_wizard import SMS_CHAR_LIMIT, SMS_COST
from math import ceil
from collections import OrderedDict
from datetime import date, datetime
from io import BytesIO
from dateutil.relativedelta import relativedelta
from odoo.addons.sponsorship_compassion.models.product import GIFT_REF
from odoo import api, models, _, fields
from odoo.exceptions import MissingError, UserError
_logger = logging.getLogger(__name__)
try:
from pyPdf import PdfFileWriter, PdfFileReader
from bs4 import BeautifulSoup
except ImportError:
_logger.warning("Please install pypdf and bs4 for using the module")
class PartnerCommunication(models.Model):
_inherit = 'partner.communication.job'
event_id = fields.Many2one('crm.event.compassion', 'Event')
ambassador_id = fields.Many2one('res.partner', 'Ambassador')
currency_id = fields.Many2one('res.currency', compute='_compute_currency')
utm_campaign_id = fields.Many2one('utm.campaign')
sms_cost = fields.Float()
sms_provider_id = fields.Many2one(
'sms.provider', 'SMS Provider',
default=lambda self: self.env.ref('sms_939.large_account_id', False),
readonly=False)
@api.model
def send_mode_select(self):
modes = super(PartnerCommunication, self).send_mode_select()
modes.append(('sms', _('SMS')))
return modes
@api.multi
def _compute_currency(self):
chf = self.env.ref('base.CHF')
for wizard in self:
wizard.currency_id = chf.id
def get_correspondence_attachments(self):
"""
Include PDF of letters if the send_mode is to print the letters.
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
attachments = dict()
# Report is used for print configuration
report = 'report_compassion.b2s_letter'
letters = self.get_objects()
if self.send_mode == 'physical':
for letter in self.get_objects():
try:
attachments[letter.file_name] = [
report, self._convert_pdf(letter.letter_image)]
except MissingError:
_logger.warn("Missing letter image", exc_info=True)
self.send_mode = False
self.auto_send = False
self.message_post(
_('The letter image is missing!'), _("Missing letter"))
continue
else:
# Attach directly a zip in the letters
letters.attach_zip()
return attachments
def get_birthday_bvr(self):
"""
Attach birthday gift slip with background for sending by e-mail
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
attachments = dict()
background = self.send_mode and 'physical' not in self.send_mode
sponsorships = self.get_objects().filtered(
lambda s: not s.birthday_paid)
gifts_to = sponsorships[:1].gift_partner_id
if sponsorships and gifts_to == self.partner_id:
birthday_gift = self.env['product.product'].search([
('default_code', '=', GIFT_REF[0])], limit=1)
attachments = sponsorships.get_bvr_gift_attachment(
birthday_gift, background)
return attachments
def get_graduation_bvr(self):
"""
Attach graduation gift slip with background for sending by e-mail
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
attachments = dict()
background = self.send_mode and 'physical' not in self.send_mode
sponsorships = self.get_objects()
graduation = self.env['product.product'].search([
('default_code', '=', GIFT_REF[4])], limit=1)
gifts_to = sponsorships[0].gift_partner_id
if sponsorships and gifts_to == self.partner_id:
attachments = sponsorships.get_bvr_gift_attachment(
graduation, background)
return attachments
def get_family_slip_attachment(self):
"""
Attach family gift slip with background for sending by e-mail
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
attachments = dict()
background = self.send_mode and 'physical' not in self.send_mode
sponsorships = self.get_objects()
family = self.env['product.product'].search([
('default_code', '=', GIFT_REF[2])], limit=1)
gifts_to = sponsorships[0].gift_partner_id
if sponsorships and gifts_to == self.partner_id:
attachments = sponsorships.get_bvr_gift_attachment(
family, background)
return attachments
def get_reminder_bvr(self):
"""
Attach sponsorship due payment slip with background for sending by
e-mail.
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
sponsorships = self.get_objects()
# Verify big due periods
if len(sponsorships.mapped('months_due')) > 3:
self.need_call = 'before_sending'
payment_mode = sponsorships.with_context(lang='en_US').mapped(
'payment_mode_id.name')[0]
# LSV-DD Waiting reminders special case
if 'Waiting Reminder' in self.config_id.name and (
'LSV' in payment_mode or 'Postfinance' in payment_mode):
if self.partner_id.bank_ids:
# We received the bank info but withdrawal didn't work.
# Mark to call in order to verify the situation.
self.need_call = 'before_sending'
else:
# Don't put payment slip if we just wait the authorization form
return dict()
# Put product sponsorship to print the payment slip for physical print.
if self.send_mode and 'physical' in self.send_mode:
self.product_id = self.env['product.product'].search([
('default_code', '=', 'sponsorship')], limit=1)
return dict()
# In other cases, attach the payment slip.
report_name = 'report_compassion.bvr_due'
return {
_('sponsorship due.pdf'): [
report_name,
base64.b64encode(self.env['report'].get_pdf(
sponsorships.ids, report_name,
data={'background': True, 'doc_ids': sponsorships.ids}
))
]
}
def get_label_from_sponsorship(self):
"""
Attach sponsorship labels. Used from communication linked to children.
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
sponsorships = self.get_objects()
return self.get_label_attachment(sponsorships)
def get_label_attachment(self, sponsorships=False):
"""
Attach sponsorship labels. Used from communication linked to children.
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
if not sponsorships:
sponsorships = self.env['recurring.contract']
children = self.get_objects()
for child in children:
sponsorships += child.sponsorship_ids[0]
attachments = dict()
label_print = self.env['label.print'].search([
('name', '=', 'Sponsorship Label')], limit=1)
label_brand = self.env['label.brand'].search([
('brand_name', '=', 'Herma A4')], limit=1)
label_format = self.env['label.config'].search([
('name', '=', '4455 SuperPrint WeiB')], limit=1)
label_wizard = self.env['label.print.wizard'].with_context({
'active_ids': sponsorships.ids,
'active_model': 'recurring.contract',
'label_print': label_print.id,
'must_skip_send_to_printer': True
}).create({
'brand_id': label_brand.id,
'config_id': label_format.id,
'number_of_labels': 33
})
label_data = label_wizard.get_report_data()
report_name = 'label.report_label'
attachments[_('sponsorship labels.pdf')] = [
report_name,
base64.b64encode(
label_wizard.env['report'].get_pdf(
label_wizard.ids, report_name, data=label_data))
]
return attachments
def get_child_picture_attachment(self):
"""
Attach child pictures to communication. It directly attach them
to the communication if sent by e-mail and therefore does
return an empty dictionary.
:return: dict {}
"""
self.ensure_one()
res = dict()
if self.send_mode and 'physical' not in self.send_mode:
# Prepare attachments in case the communication is sent by e-mail
children = self.get_objects()
attachments = self.env['ir.attachment']
for child in children:
name = child.local_id + ' ' + child.last_photo_date + '.jpg'
attachments += attachments.create({
'name': name,
'datas_fname': name,
'res_model': self._name,
'res_id': self.id,
'datas': child.fullshot,
})
self.with_context(no_print=True).ir_attachment_ids = attachments
else:
self.ir_attachment_ids = False
return res
def get_yearly_payment_slips_2bvr(self):
return self.get_yearly_payment_slips(bv_number=2)
def get_yearly_payment_slips(self, bv_number=3):
"""
Attach payment slips
:param bv_number number of BV on a page (switch between 2BV/3BV page)
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
assert bv_number in (2, 3)
sponsorships = self.get_objects()
payment_mode_bvr = self.env.ref(
'sponsorship_switzerland.payment_mode_bvr')
attachments = dict()
# IF payment mode is BVR and partner is paying
# attach sponsorship payment slips
pay_bvr = sponsorships.filtered(
lambda s: s.payment_mode_id == payment_mode_bvr and
s.partner_id == self.partner_id)
report_obj = self.env['report']
if pay_bvr and pay_bvr.must_pay_next_year():
today = date.today()
date_start = today.replace(today.year + 1, 1, 1)
date_stop = date_start.replace(month=12, day=31)
report_name = 'report_compassion.{}bvr_sponsorship'.format(
bv_number)
attachments.update({
_('sponsorship payment slips.pdf'): [
report_name,
base64.b64encode(report_obj.get_pdf(
pay_bvr.ids, report_name,
data={
'doc_ids': pay_bvr.ids,
'date_start': fields.Date.to_string(date_start),
'date_stop': fields.Date.to_string(date_stop),
'background': self.send_mode != 'physical'
}
))
]
})
# Attach gifts for correspondents
pays_gift = self.env['recurring.contract']
for sponsorship in sponsorships:
if sponsorship.mapped(sponsorship.send_gifts_to) == \
self.partner_id:
pays_gift += sponsorship
if pays_gift:
report_name = 'report_compassion.{}bvr_gift_sponsorship'.format(
bv_number)
product_ids = self.env['product.product'].search([
('default_code', 'in', GIFT_REF[:3])
]).ids
attachments.update({
_('sponsorship gifts.pdf'): [
report_name,
base64.b64encode(report_obj.get_pdf(
pays_gift.ids, report_name,
data={
'doc_ids': pays_gift.ids,
'product_ids': product_ids
}
))
]
})
return attachments
def get_childpack_attachment(self):
self.ensure_one()
lang = self.partner_id.lang
sponsorships = self.get_objects()
exit_conf = self.env.ref(
'partner_communication_switzerland.lifecycle_child_planned_exit')
if self.config_id == exit_conf and sponsorships.mapped(
'sub_sponsorship_id'):
sponsorships = sponsorships.mapped('sub_sponsorship_id')
children = sponsorships.mapped('child_id')
# Always retrieve latest information before printing dossier
children.get_infos()
report_name = 'report_compassion.childpack_small'
return {
_('child dossier.pdf'): [
report_name,
base64.b64encode(self.env['report'].get_pdf(
children.ids, report_name, data={
'lang': lang,
'is_pdf': self.send_mode != 'physical',
'type': report_name,
}))
]
}
def get_tax_receipt(self):
self.ensure_one()
res = {}
if self.send_mode == 'digital':
report_name = 'report_compassion.tax_receipt'
data = {
'doc_ids': self.partner_id.ids,
'year': self.env.context.get('year', date.today().year - 1),
'lang': self.partner_id.lang,
}
res = {
_('tax receipt.pdf'): [
report_name,
base64.b64encode(
self.env['report'].with_context(
must_skip_send_to_printer=True).get_pdf(
self.partner_id.ids, report_name, data=data))
]
}
return res
@api.multi
def send(self):
"""
- Prevent sending communication when invoices are being reconciled
- Mark B2S correspondence as read when printed.
- Postpone no money holds when reminders sent.
- Update donor tag
- Sends SMS for sms send_mode
:return: True
"""
sms_jobs = self.filtered(lambda j: j.send_mode == 'sms')
sms_jobs.send_by_sms()
other_jobs = self - sms_jobs
for job in other_jobs.filtered(lambda j: j.model in (
'recurring.contract', 'account.invoice')):
queue_job = self.env['queue.job'].search([
('channel', '=', 'root.group_reconcile'),
('state', '!=', 'done'),
], limit=1)
if queue_job:
invoices = self.env['account.invoice'].browse(
queue_job.record_ids)
if job.partner_id in invoices.mapped('partner_id'):
retry = 0
state = queue_job.state
while state != 'done' and retry < 5:
if queue_job.state == 'failed':
raise UserError(_(
"A reconcile job has failed. Please call "
"an admin for help."
))
_logger.info("Reconcile job is processing! Going in "
"sleep for five seconds...")
time.sleep(5)
state = queue_job.read(['state'])[0]['state']
retry += 1
if queue_job.state != 'done':
raise UserError(_(
"Some invoices of the partner are just being "
"reconciled now. Please wait the process to finish"
" before printing the communication."
))
super(PartnerCommunication, other_jobs).send()
b2s_printed = other_jobs.filtered(
lambda c: c.config_id.model == 'correspondence' and
c.send_mode == 'physical' and c.state == 'done')
if b2s_printed:
letters = b2s_printed.get_objects()
if letters:
letters.write({
'letter_delivered': True,
})
# No money extension
no_money_1 = self.env.ref('partner_communication_switzerland.'
'sponsorship_waiting_reminder_1')
no_money_2 = self.env.ref('partner_communication_switzerland.'
'sponsorship_waiting_reminder_2')
no_money_3 = self.env.ref('partner_communication_switzerland.'
'sponsorship_waiting_reminder_3')
settings = self.env['availability.management.settings']
first_extension = settings.get_param('no_money_hold_duration')
second_extension = settings.get_param('no_money_hold_extension')
for communication in other_jobs:
extension = False
if communication.config_id == no_money_1:
extension = first_extension + 7
elif communication.config_id == no_money_2:
extension = second_extension + 7
elif communication.config_id == no_money_3:
extension = 10
if extension:
holds = communication.get_objects().mapped('child_id.hold_id')
for hold in holds:
expiration = datetime.now() + relativedelta(days=extension)
hold.expiration_date = fields.Datetime.to_string(
expiration)
donor = self.env.ref('partner_compassion.res_partner_category_donor')
partners = other_jobs.filtered(
lambda j: j.config_id.model == 'account.invoice.line' and
donor not in j.partner_id.category_id).mapped('partner_id')
partners.write({'category_id': [(4, donor.id)]})
return True
@api.multi
def send_by_sms(self):
"""
Sends communication jobs with SMS 939 service.
:return: list of sms_texts
"""
link_pattern = re.compile(r'<a href="(.*)">(.*)</a>', re.DOTALL)
sms_medium_id = self.env.ref('sms_sponsorship.utm_medium_sms').id
sms_texts = []
for job in self.filtered('partner_mobile'):
sms_text = job.convert_html_for_sms(link_pattern, sms_medium_id)
sms_texts.append(sms_text)
sms_wizard = self.env['sms.sender.wizard'].with_context(
partner_id=job.partner_id.id).create({
'subject': job.subject,
'text': sms_text,
'sms_provider_id': job.sms_provider_id.id
})
sms_wizard.send_sms_partner()
job.write({
'state': 'done',
'sent_date': fields.Datetime.now(),
'sms_cost': ceil(
float(len(sms_text)) / SMS_CHAR_LIMIT) * SMS_COST
})
return sms_texts
def convert_html_for_sms(self, link_pattern, sms_medium_id):
"""
Converts HTML into simple text for SMS.
First replace links with short links using Link Tracker.
Then clean HTML using BeautifulSoup library.
:param link_pattern: the regex pattern for replacing links
:param sms_medium_id: the associated utm.medium id for generated links
:return: Clean text with short links for SMS use.
"""
self.ensure_one()
source_id = self.config_id.source_id.id
def _replace_link(match):
full_link = match.group(1).replace('&', '&')
short_link = self.env['link.tracker'].create({
'url': full_link,
'campaign_id': self.utm_campaign_id.id or self.env.ref(
'partner_communication_switzerland.'
'utm_campaign_communication').id,
'medium_id': sms_medium_id,
'source_id': source_id
})
return short_link.short_url
links_converted_text = link_pattern.sub(_replace_link, self.body_html)
soup = BeautifulSoup(links_converted_text, "lxml")
return soup.get_text().strip()
@api.multi
def open_related(self):
""" Select a better view for invoice lines. """
res = super(PartnerCommunication, self).open_related()
if self.config_id.model == 'account.invoice.line':
res['context'] = self.with_context(
tree_view_ref='sponsorship_compassion'
'.view_invoice_line_partner_tree',
group_by=False
).env.context
return res
def get_new_dossier_attachments(self):
"""
Returns pdfs for the New Dossier Communication, including:
- Sponsorship payment slips (if payment is True)
- Small Childpack
- Sponsorship labels (if correspondence is True)
- Child picture
:return: dict {attachment_name: [report_name, pdf_data]}
"""
self.ensure_one()
attachments = OrderedDict()
report_obj = self.env['report']
account_payment_mode_obj = self.env['account.payment.mode']\
.with_context(lang='en_US')
lsv_dd_modes = account_payment_mode_obj.search(
['|', ('name', 'like', 'Direct Debit'), ('name', 'like', 'LSV')])
permanent_order = self.env.ref(
'sponsorship_switzerland.payment_mode_permanent_order')
sponsorships = self.get_objects()
# Sponsorships included for payment slips
bv_sponsorships = sponsorships.filtered(
# 1. Needs to be payer
lambda s: s.partner_id == self.partner_id and
# 2. Permanent Order are always included
s.payment_mode_id == permanent_order or (
# 3. LSV/DD are never included
s.payment_mode_id not in lsv_dd_modes and
# 4. If already paid they are not included
not s.period_paid)
)
write_sponsorships = sponsorships.filtered(
lambda s: s.correspondent_id == self.partner_id)
# Include all active sponsorships for Permanent Order
bv_sponsorships |= bv_sponsorships\
.filtered(lambda s: s.payment_mode_id == permanent_order)\
.mapped('group_id.contract_ids').filtered(
lambda s: s.state in ('active', 'waiting'))
# Payment slips
if bv_sponsorships:
report_name = 'report_compassion.3bvr_sponsorship'
if bv_sponsorships.mapped('payment_mode_id') == permanent_order:
# One single slip is enough for permanent order.
report_name = 'report_compassion.bvr_sponsorship'
attachments.update({
_('sponsorship payment slips.pdf'): [
report_name,
base64.b64encode(report_obj.get_pdf(
bv_sponsorships.ids, report_name,
data={
'doc_ids': bv_sponsorships.ids,
'background': self.send_mode != 'physical'
}
))
]
})
# Childpack if not a SUB of planned exit.
lifecycle = sponsorships.mapped('parent_id.child_id.lifecycle_ids')
planned_exit = lifecycle and lifecycle[0].type == 'Planned Exit'
if not planned_exit:
attachments.update(self.get_childpack_attachment())
# Labels
if write_sponsorships:
attachments.update(self.get_label_attachment(write_sponsorships))
# Child picture
report_name = 'partner_communication_switzerland.child_picture'
child_ids = sponsorships.mapped('child_id').ids
attachments.update({
_('child picture.pdf'): [
report_name,
base64.b64encode(report_obj.get_pdf(
child_ids, report_name,
data={'doc_ids': child_ids}
))
]
})
# Country information
for field_office in self.get_objects().mapped(
'child_id.field_office_id'):
country_pdf = field_office.country_info_pdf
if country_pdf:
attachments.update({
field_office.name + ".pdf": [
'partner_communication_switzerland.field_office_info',
country_pdf
]
})
return attachments
def get_csp_attachment(self):
self.ensure_one()
attachments = OrderedDict()
report_obj = self.env['report']
account_payment_mode_obj = self.env['account.payment.mode']
csp = self.get_objects()
# Include all active csp for Permanent Order
if 'Permanent Order' in csp.with_context(
lang='en_US').mapped('payment_mode_id.name'):
csp += csp.mapped(
'group_id.contract_ids').filtered(
lambda s: s.state == 'active')
is_payer = self.partner_id in csp.mapped('partner_id')
make_payment_pdf = True
# LSV/DD don't need a payment slip
groups = csp.mapped('group_id')
lsv_dd_modes = account_payment_mode_obj.search(
['|', ('name', 'like', 'Direct Debit'), ('name', 'like', 'LSV')])
lsv_dd_groups = groups.filtered(
lambda r: r.payment_mode_id in lsv_dd_modes)
if len(lsv_dd_groups) == len(groups):
make_payment_pdf = False
# If partner already paid, avoid payment slip
if len(csp.filtered('period_paid')) == len(csp):
make_payment_pdf = False
# Payment slips
if is_payer and make_payment_pdf:
report_name = 'report_compassion.3bvr_sponsorship'
attachments.update({
_('csv payment slips.pdf'): [
report_name,
base64.b64encode(report_obj.get_pdf(
csp.ids, report_name,
data={
'doc_ids': csp.ids,
'background': self.send_mode != 'physical'
}
))
]
})
return attachments
def _convert_pdf(self, pdf_data):
"""
Converts all pages of PDF in A4 format if communication is
printed.
:param pdf_data: binary data of original pdf
:return: binary data of converted pdf
"""
if self.send_mode != 'physical':
return pdf_data
pdf = PdfFileReader(BytesIO(base64.b64decode(pdf_data)))
convert = PdfFileWriter()
a4_width = 594.48
a4_height = 844.32 # A4 units in PyPDF
for i in xrange(0, pdf.numPages):
# translation coordinates
tx = 0
ty = 0
page = pdf.getPage(i)
corner = [float(x) for x in page.mediaBox.getUpperRight()]
if corner[0] > a4_width or corner[1] > a4_height:
page.scaleBy(max(a4_width / corner[0], a4_height / corner[1]))
elif corner[0] < a4_width or corner[1] < a4_height:
tx = (a4_width - corner[0]) / 2
ty = (a4_height - corner[1]) / 2
convert.addBlankPage(a4_width, a4_height)
convert.getPage(i).mergeTranslatedPage(page, tx, ty)
output_stream = BytesIO()
convert.write(output_stream)
output_stream.seek(0)
return base64.b64encode(output_stream.read())
| agpl-3.0 |
Page-David/wget-fast | configer.py | 1 | 2463 | #!/usr/bin/env python3
import urllib.parse
import requests
import queue
import os
import interface
class Download_Configer(object):
# Init download settings...
def __init__(self, url, saveto):
self.url = url
parse_result = urllib.parse.urlparse(self.url)
self.filename = self.url.split('/')[-1]
self.protocol = parse_result.scheme
self.domain = parse_result.netloc
self.saveto = saveto
self.path = os.path.join(self.saveto, self.filename)
self.max_thread = 10
self.min_block = 1000
self.down_queue = queue.Queue(self.max_thread)
self._get_url_header()
self._block_content()
self._touch_file()
# Occur HEAD request and get more information
def _get_url_header(self):
interface.info_out('HTTP_REQUEST')
headers = {
'Range': 'bytes=0-1'
}
response = requests.get(self.url, stream = True, headers = headers)
if response.status_code == 206:
self.partital_content = True
interface.info_out('PARTITAL_SUPPORT')
self.content_length =int(response.headers['Content-Range']\
.split('/')[1])
elif response.status_code // 100 == 4:
interface.info_out('CONNECTION_ERROR', response.status_code)
elif response.status_code // 100 == 2:
self.partital_content = False
interface.info_out('PARTITAL_NOT_SUPPORT')
self.content_length = int(response.headers['Content-Length'])
interface.info_out('CONTENT_LENGTH', self.content_length)
# Break tasks into partital content
def _block_content(self):
if self.content_length // self.max_thread > self.min_block:
self.min_block = self.content_length // self.max_thread+1
self.x = 0
while self.x < self.content_length:
if self.x+self.min_block > self.content_length:
self.down_queue.put((self.x, self.content_length-1))
else:
self.down_queue.put((self.x, self.x+self.min_block-1))
self.x += self.min_block
def _touch_file(self):
open(self.path, 'w').close()
if __name__ == '__main__':
d = Download_Configer('https://raw.githubusercontent.com/getlantern/lantern-binaries/master/lantern-installer-beta.exe',
'/home/lancaster')
while not d.down_queue.empty():
print(d.down_queue.get())
| gpl-3.0 |
lalithsuresh/QEMU-Device-State-Visualisations | scripts/simpletrace.py | 12 | 2522 | #!/usr/bin/env python
#
# Pretty-printer for simple trace backend binary trace files
#
# Copyright IBM, Corp. 2010
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
#
# For help see docs/tracing.txt
import sys
import struct
import re
header_event_id = 0xffffffffffffffff
header_magic = 0xf2b177cb0aa429b4
header_version = 0
trace_fmt = '=QQQQQQQQ'
trace_len = struct.calcsize(trace_fmt)
event_re = re.compile(r'(disable\s+)?([a-zA-Z0-9_]+)\(([^)]*)\).*')
def err(msg):
sys.stderr.write(msg + '\n')
sys.exit(1)
def parse_events(fobj):
"""Parse a trace-events file."""
def get_argnames(args):
"""Extract argument names from a parameter list."""
return tuple(arg.split()[-1].lstrip('*') for arg in args.split(','))
events = {}
event_num = 0
for line in fobj:
m = event_re.match(line.strip())
if m is None:
continue
disable, name, args = m.groups()
events[event_num] = (name,) + get_argnames(args)
event_num += 1
return events
def read_record(fobj):
"""Deserialize a trace record from a file."""
s = fobj.read(trace_len)
if len(s) != trace_len:
return None
return struct.unpack(trace_fmt, s)
def read_trace_file(fobj):
"""Deserialize trace records from a file."""
header = read_record(fobj)
if header is None or \
header[0] != header_event_id or \
header[1] != header_magic or \
header[2] != header_version:
err('not a trace file or incompatible version')
while True:
rec = read_record(fobj)
if rec is None:
break
yield rec
class Formatter(object):
def __init__(self, events):
self.events = events
self.last_timestamp = None
def format_record(self, rec):
if self.last_timestamp is None:
self.last_timestamp = rec[1]
delta_ns = rec[1] - self.last_timestamp
self.last_timestamp = rec[1]
event = self.events[rec[0]]
fields = [event[0], '%0.3f' % (delta_ns / 1000.0)]
for i in xrange(1, len(event)):
fields.append('%s=0x%x' % (event[i], rec[i + 1]))
return ' '.join(fields)
if len(sys.argv) != 3:
err('usage: %s <trace-events> <trace-file>' % sys.argv[0])
events = parse_events(open(sys.argv[1], 'r'))
formatter = Formatter(events)
for rec in read_trace_file(open(sys.argv[2], 'rb')):
print formatter.format_record(rec)
| gpl-2.0 |
hobarrera/django | tests/responses/tests.py | 33 | 4881 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import io
from django.conf import settings
from django.core.cache import cache
from django.http import HttpResponse
from django.http.response import HttpResponseBase
from django.test import SimpleTestCase
UTF8 = 'utf-8'
ISO88591 = 'iso-8859-1'
class HttpResponseBaseTests(SimpleTestCase):
def test_closed(self):
r = HttpResponseBase()
self.assertIs(r.closed, False)
r.close()
self.assertIs(r.closed, True)
def test_write(self):
r = HttpResponseBase()
self.assertIs(r.writable(), False)
with self.assertRaisesMessage(IOError, 'This HttpResponseBase instance is not writable'):
r.write('asdf')
with self.assertRaisesMessage(IOError, 'This HttpResponseBase instance is not writable'):
r.writelines(['asdf\n', 'qwer\n'])
def test_tell(self):
r = HttpResponseBase()
with self.assertRaisesMessage(IOError, 'This HttpResponseBase instance cannot tell its position'):
r.tell()
def test_setdefault(self):
"""
HttpResponseBase.setdefault() should not change an existing header
and should be case insensitive.
"""
r = HttpResponseBase()
r['Header'] = 'Value'
r.setdefault('header', 'changed')
self.assertEqual(r['header'], 'Value')
r.setdefault('x-header', 'DefaultValue')
self.assertEqual(r['X-Header'], 'DefaultValue')
class HttpResponseTests(SimpleTestCase):
def test_status_code(self):
resp = HttpResponse(status=503)
self.assertEqual(resp.status_code, 503)
self.assertEqual(resp.reason_phrase, "Service Unavailable")
def test_change_status_code(self):
resp = HttpResponse()
resp.status_code = 503
self.assertEqual(resp.status_code, 503)
self.assertEqual(resp.reason_phrase, "Service Unavailable")
def test_reason_phrase(self):
reason = "I'm an anarchist coffee pot on crack."
resp = HttpResponse(status=814, reason=reason)
self.assertEqual(resp.status_code, 814)
self.assertEqual(resp.reason_phrase, reason)
def test_charset_detection(self):
""" HttpResponse should parse charset from content_type."""
response = HttpResponse('ok')
self.assertEqual(response.charset, settings.DEFAULT_CHARSET)
response = HttpResponse(charset=ISO88591)
self.assertEqual(response.charset, ISO88591)
self.assertEqual(response['Content-Type'], 'text/html; charset=%s' % ISO88591)
response = HttpResponse(content_type='text/plain; charset=%s' % UTF8, charset=ISO88591)
self.assertEqual(response.charset, ISO88591)
response = HttpResponse(content_type='text/plain; charset=%s' % ISO88591)
self.assertEqual(response.charset, ISO88591)
response = HttpResponse(content_type='text/plain; charset="%s"' % ISO88591)
self.assertEqual(response.charset, ISO88591)
response = HttpResponse(content_type='text/plain; charset=')
self.assertEqual(response.charset, settings.DEFAULT_CHARSET)
response = HttpResponse(content_type='text/plain')
self.assertEqual(response.charset, settings.DEFAULT_CHARSET)
def test_response_content_charset(self):
"""HttpResponse should encode based on charset."""
content = "Café :)"
utf8_content = content.encode(UTF8)
iso_content = content.encode(ISO88591)
response = HttpResponse(utf8_content)
self.assertContains(response, utf8_content)
response = HttpResponse(iso_content, content_type='text/plain; charset=%s' % ISO88591)
self.assertContains(response, iso_content)
response = HttpResponse(iso_content)
self.assertContains(response, iso_content)
response = HttpResponse(iso_content, content_type='text/plain')
self.assertContains(response, iso_content)
def test_repr(self):
response = HttpResponse(content="Café :)".encode(UTF8), status=201)
expected = '<HttpResponse status_code=201, "text/html; charset=utf-8">'
self.assertEqual(repr(response), expected)
def test_wrap_textiowrapper(self):
content = "Café :)"
r = HttpResponse()
with io.TextIOWrapper(r, UTF8) as buf:
buf.write(content)
self.assertEqual(r.content, content.encode(UTF8))
def test_generator_cache(self):
generator = ("{}".format(i) for i in range(10))
response = HttpResponse(content=generator)
self.assertEqual(response.content, b'0123456789')
with self.assertRaises(StopIteration):
next(generator)
cache.set('my-response-key', response)
response = cache.get('my-response-key')
self.assertEqual(response.content, b'0123456789')
| bsd-3-clause |
abhiatgithub/shogun-toolbox | examples/undocumented/python_modular/mathematics_logdet.py | 29 | 2923 | #!/usr/bin/env python
from numpy import *
from scipy.io import mmread
# Loading an example sparse matrix of dimension 479x479, real, unsymmetric
mtx=mmread('../../../data/logdet/west0479.mtx')
parameter_list=[[mtx,100,60,1]]
def mathematics_logdet (matrix=mtx,max_iter_eig=1000,max_iter_lin=1000,num_samples=1):
from scipy.sparse import eye
# Create a Hermitian sparse matrix
rows=matrix.shape[0]
cols=matrix.shape[1]
A=matrix.transpose()*matrix+eye(rows, cols)
from scipy.sparse import csc_matrix
try:
from shogun.Mathematics import RealSparseMatrixOperator
from shogun.Mathematics import LanczosEigenSolver
from shogun.Mathematics import CGMShiftedFamilySolver
from shogun.Mathematics import LogRationalApproximationCGM
from shogun.Mathematics import ProbingSampler
from shogun.Mathematics import LogDetEstimator
from shogun.Mathematics import Statistics
from shogun.Library import SerialComputationEngine
# creating the linear operator, eigen-solver
op=RealSparseMatrixOperator(A.tocsc())
eig_solver=LanczosEigenSolver(op)
# we can set the iteration limit high for poorly conditioned matrices
eig_solver.set_max_iteration_limit(max_iter_eig)
# alternatively, if the matrix is small, we can compute eigenvalues externally
# and set min/max eigenvalues into the eigensolver
# from scipy.sparse.linalg import eigsh
# eigenvalues=eigsh(A, rows-1)
# eig_solver.set_min_eigenvalue(eigenvalues[0][0])
# eig_solver.set_max_eigenvalue(eigenvalues[0][-1])
# create the shifted-family linear solver which solves for all the shifts
# using as many matrix-vector products as one shift in CG iterations
lin_solver=CGMShiftedFamilySolver()
lin_solver.set_iteration_limit(max_iter_lin)
# computation engine
engine=SerialComputationEngine()
# set the desired accuracy tighter to obtain better results
# this determines the number of contour points in conformal mapping of
# the rational approximation of the Cauchy's integral of f(A)*s, f=log
desired_accuracy=1E-5
# creating the log-linear-operator function
op_func=LogRationalApproximationCGM(op, engine, eig_solver, lin_solver,\
desired_accuracy)
# set the trace sampler to be probing sampler, in which samples are obtained
# by greedy graph coloring of the power of sparse matrix (default is power=1,
# 2-distance coloring)
trace_sampler=ProbingSampler(op)
# estimating log-det
log_det_estimator=LogDetEstimator(trace_sampler, op_func, engine)
# set the number of samples as required
estimates=log_det_estimator.sample(num_samples)
estimated_logdet=sum(estimates)/len(estimates)
actual_logdet=Statistics.log_det(A)
print(actual_logdet, estimated_logdet)
return estimates
except ImportError:
print('One or many of the dependencies (Eigen3/LaPack/ColPack) not found!')
if __name__=='__main__':
print('LogDetEstimator')
mathematics_logdet (*parameter_list[0])
| gpl-3.0 |
fcolamar/AliPhysics | PWGJE/EMCALJetTasks/Tracks/analysis/base/TriggerEfficiency.py | 41 | 2551 | #**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
class TriggerEfficiency:
"""
Class calculating the trigger efficiency from a given min. bias container and a given triggered container
"""
def __init__(self, triggername, minbiascontainer, triggeredcontainer):
"""
Constructor
"""
self.__triggername = triggername
self.__minbiascontainer = minbiascontainer
self.__triggeredcontainer = triggeredcontainer
self.__triggerefficiency = None
self.__CalculateTriggerEfficiency()
def __MakeNormalisedSpectrum(self, container, name):
container.SetVertexRange(-10., 10.)
container.SetPileupRejection(True)
if container.__class__ == "TrackContainer":
container.SelectTrackCuts(1)
container.RequestSeenInMinBias()
return container.MakeProjection(0, "ptSpectrum%s" %(name), "p_{#rm{t}} (GeV/c)", "1/N_{event} 1/(#Delta p_{#rm t}) dN/dp_{#rm{t}} ((GeV/c)^{-2}", doNorm = False)
def __CalculateTriggerEfficiency(self):
minbiasspectrum = self.__MakeNormalisedSpectrum(self.__minbiascontainer, "minbias")
self.__triggerefficiency = self.__MakeNormalisedSpectrum(self.__triggeredcontainer, self.__triggername)
self.__triggerefficiency.Divide(self.__triggerefficiency, minbiasspectrum, 1., 1., "b")
self.__triggerefficiency.SetName("triggerEff%s" %(self.__triggername))
def GetEfficiencyCurve(self):
return self.__triggerefficiency
| bsd-3-clause |
kalev/anaconda | pyanaconda/iw/partition_gui.py | 2 | 72665 | #
# partition_gui.py: allows the user to choose how to partition their disks
#
# Copyright (C) 2001, 2002 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Matt Wilson <[email protected]>
# Michael Fulbright <[email protected]>
#
import os
import gobject
import gtk
import gtk.glade
try:
import gnomecanvas
except ImportError:
import gnome.canvas as gnomecanvas
import pango
from pyanaconda import gui
import parted
import string
import types
import copy
from decimal import Decimal
from pyanaconda import storage
from iw_gui import *
from pyanaconda.flags import flags
import datacombo
import lvm_dialog_gui as l_d_g
import raid_dialog_gui as r_d_g
import partition_dialog_gui as p_d_g
from pyanaconda.partIntfHelpers import *
from pyanaconda.constants import *
from partition_ui_helpers_gui import *
from pyanaconda.storage.partitioning import doPartitioning
from pyanaconda.storage.devicelibs import lvm
from pyanaconda.storage.devices import devicePathToName
from pyanaconda.storage.devices import PartitionDevice
from pyanaconda.storage.devices import BTRFSVolumeDevice
from pyanaconda.storage.devices import deviceNameToDiskByPath
from pyanaconda.storage.errors import DeviceNotFoundError
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
P_ = lambda x, y, z: gettext.ldngettext("anaconda", x, y, z)
import logging
log = logging.getLogger("anaconda")
STRIPE_HEIGHT = 35.0
LOGICAL_INSET = 3.0
TREE_SPACING = 2
# XXX hack but will work for now
if gtk.gdk.screen_width() > 640:
CANVAS_WIDTH = 490
else:
CANVAS_WIDTH = 390
CANVAS_HEIGHT = 200
MODE_ADD = 1
MODE_EDIT = 2
class Slice:
"""Class representing a slice of a stripe.
parent -- the stripe that the slice belongs too.
text -- what will appear in the slice
type -- either SLICE or SUBSLICE
xoffset -- start percentage
xlength -- a length percentage
dcCB -- function that is called on a double click.
cCB -- function that is called when one click (selected)
sel_col -- color when selected
unsel_col -- color when unselected
obj -- some python object that is related to this slice.
selected -- initial state of slice.
"""
SLICE = 0
SUBSLICE = 1
CONTAINERSLICE = 2
def __init__(self, parent, text, type, xoffset, xlength, dcCB=lambda: None,
cCB=lambda x: None, sel_col="cornsilk1", unsel_col="white",
obj = None, selected = False):
self.text = text
self.type = type
self.xoffset = xoffset
self.xlength = xlength
self.parent = parent
self.dcCB = dcCB
self.cCB = cCB
self.sel_col = sel_col
self.unsel_col = unsel_col
self.obj = obj
self.selected = selected
def eventHandler(self, widget, event):
if event.type == gtk.gdk.BUTTON_PRESS:
if event.button == 1:
self.select()
self.cCB(self.obj)
elif event.type == gtk.gdk._2BUTTON_PRESS:
#self.select()
self.dcCB()
return True
def putOnCanvas(self):
pgroup = self.parent.getGroup()
self.group = pgroup.add(gnomecanvas.CanvasGroup)
self.box = self.group.add(gnomecanvas.CanvasRect)
self.group.connect("event", self.eventHandler)
canvas_text = self.group.add(gnomecanvas.CanvasText,
font="sans", size_points=8)
xoffset = self.xoffset * CANVAS_WIDTH
xlength = self.xlength * CANVAS_WIDTH
if self.type == Slice.SUBSLICE:
yoffset = 0.0 + LOGICAL_INSET
yheight = STRIPE_HEIGHT - (LOGICAL_INSET * 2)
texty = 0.0
else:
yoffset = 0.0
yheight = STRIPE_HEIGHT
texty = LOGICAL_INSET
if self.selected:
fill_color = self.sel_col
else:
fill_color = self.unsel_col
self.group.set(x=xoffset, y=yoffset)
self.box.set(x1=0.0, y1=0.0, x2=xlength,
y2=yheight, fill_color=fill_color,
outline_color='black', width_units=1.0)
canvas_text.set(x=2.0, y=texty + 2.0, text=self.text,
fill_color='black',
anchor=gtk.ANCHOR_NW, clip=True,
clip_width=xlength-1, clip_height=yheight-1)
def shutDown(self):
self.parent = None
if self.group:
self.group.destroy()
self.group = None
def select(self):
for slice in self.parent.slices:
slice.deselect()
self.selected = True
if self.group and self.box:
if self.type != Slice.CONTAINERSLICE:
self.group.raise_to_top()
self.box.set(outline_color="red")
self.box.set(fill_color=self.sel_col)
def deselect(self):
self.selected = False
if self.box:
self.box.set(outline_color="black", fill_color=self.unsel_col)
class Stripe(object):
"""
canvas -- the canvas where everything goes
text -- the text that will appear on top of the stripe
yoff -- its the position in the y axis where this stripe should be drawn
dcCB -- function that should be called on a double click
obj -- some python object that is related to this stripe
"""
def __init__(self, canvas, text, dcCB, obj = None):
self.canvas_text = None
self.canvas = canvas
self.text = text
self.group = None
self._slices = []
self.dcCB = dcCB
self.selected = None
self.obj = obj
def putOnCanvas(self, yoff):
"""
returns the yposition after drawhing this stripe.
"""
# We set the text for the stripe.
self.canvas_text = self.canvas.root().add(gnomecanvas.CanvasText,
x=0.0, y=yoff, font="sans", size_points=9)
self.canvas_text.set(text=self.text, fill_color='black',
anchor=gtk.ANCHOR_NW, weight=pango.WEIGHT_BOLD)
(xxx1, yyy1, xxx2, yyy2) = self.canvas_text.get_bounds()
textheight = yyy2 - yyy1 + 2
self.group = self.canvas.root().add(gnomecanvas.CanvasGroup,
x=0, y=yoff+textheight)
self.group.add(gnomecanvas.CanvasRect, x1=0.0, y1=0.0, x2=CANVAS_WIDTH,
y2=STRIPE_HEIGHT, fill_color='green',
outline_color='grey71', width_units=1.0)
self.group.lower_to_bottom()
# We paint all the container slices first. So the contained slices
# actually show up.
for slice in [s for s in self.slices if s.type == Slice.CONTAINERSLICE]:
slice.putOnCanvas()
# After painting the containers we paint the rest.
for slice in [s for s in self.slices if s.type != Slice.CONTAINERSLICE]:
slice.putOnCanvas()
# 10 is a separator space.
return yoff + STRIPE_HEIGHT+textheight+10
def shutDown(self):
for slice in self.slices:
slice.shutDown()
self._slices = []
if self.canvas_text:
self.canvas_text.destroy()
if self.group:
self.group.destroy()
self.group = None
def getGroup(self):
return self.group
@property
def slices(self):
return self._slices
def addSlice(self, new_slice):
# check to see if they overlap.
for slice in self.slices:
# Container slices and subslices can overlap.
if new_slice.type+slice.type == Slice.CONTAINERSLICE+Slice.SUBSLICE:
continue
if new_slice.xoffset > slice.xoffset \
and new_slice.xoffset < slice.xoffset + slice.xlength:
# there is a colission, we cannot add.
return
self._slices.append(new_slice)
def getSelectedSlice(self):
for slice in self.slices:
if slice.selected:
return slice
return None
class StripeGraph:
""" This class will only handle one stripe."""
__canvas = None
def __init__(self):
self.stripe = None
self.next_ypos = 0.0
def __del__(self):
self.shutDown()
def shutDown(self):
if self.stripe:
self.stripe.shutDown()
self.stripe = None
self.next_ypos = 0.0
@classmethod
def getCanvas(cls):
if not StripeGraph.__canvas:
StripeGraph.__canvas = gnomecanvas.Canvas()
return StripeGraph.__canvas
def setDisplayed(self, obj):
# Check to see if we already have the correct obj displayed.
if self.getDisplayed() and self.getDisplayed().obj == obj:
return
if self.stripe:
self.stripe.shutDown()
self.stripe = self._createStripe(obj)
self.stripe.putOnCanvas(0)
# Trying to center the picture.
apply(self.getCanvas().set_scroll_region, self.getCanvas().root().get_bounds())
def getDisplayed(self):
return self.stripe
def selectSliceFromObj(self, obj):
"""Search for obj in the slices """
stripe = self.getDisplayed()
if not stripe:
return
for slice in stripe.slices:
# There is a part object in each slice.
if not slice.obj:
continue
if obj == slice.obj and not slice.selected:
slice.select()
break
def _createStripe(self, obj):
#This method needs to be overridden
pass
def getSelectedSlice(self):
return self.stripe.getSelectedSlice()
class DiskStripeGraph(StripeGraph):
"""Handles the creation of a bar view for the 'normal' devies.
storage -- the storage object
cCB -- call back function used when the user clicks on a slice. This function
is passed a device object when its executed.
dcCB -- call back function used when the user double clicks on a slice.
drive -- drive to display
"""
def __init__(self, storage, drive=None, cCB=lambda x:None, dcCB=lambda:None):
StripeGraph.__init__(self)
self.storage = storage
self.cCB = cCB
self.dcCB = dcCB
# Define the default colors per partition type.
self.part_type_colors = \
{"sel_logical": "cornsilk1", "unsel_logical": "white",
"sel_extended": "cornsilk1", "unsel_extended": "white",
"sel_normal": "cornsilk1", "unsel_normal": "white",
"sel_freespace": "grey88", "unsel_freespace": "grey88"}
if drive:
self.setDisplayed(drive)
def _createStripe(self, drive):
# Create the stripe
drivetext = _("Drive %(drive)s (%(size)-0.f MB) (Model: %(model)s)") \
% {'drive': drive.path,
'size': drive.size,
'model': drive.model}
stripe = Stripe(self.getCanvas(), drivetext, self.dcCB, obj = drive)
# Create the slices.
# These offsets are where the partition/slices end. 0<offset<1
for part in drive.format.partedDisk.getFreeSpacePartitions() \
+ [d for d in drive.format.partitions]:
if part.getSize(unit="MB") <= 1.0 or \
part.type & parted.PARTITION_METADATA:
continue
# Create the start and length for the slice.
xoffset = (Decimal(str(part.geometry.start))
/ Decimal(str(drive.partedDevice.length)))
xlength = (Decimal(str(part.geometry.length))
/ Decimal(str(drive.partedDevice.length)))
if part.type & parted.PARTITION_LOGICAL:
if part.type & parted.PARTITION_FREESPACE:
name = _("Free")
unsel_col = self.part_type_colors["unsel_freespace"]
sel_col = self.part_type_colors["sel_freespace"]
else:
name = part.path
unsel_col = self.part_type_colors["unsel_logical"]
sel_col = self.part_type_colors["sel_logical"]
partstr = "%s\n%.0f MB" % (name, float(part.getSize()))
stype = Slice.SUBSLICE
elif part.type & parted.PARTITION_FREESPACE:
partstr = "%s\n%.0f MB" % (_("Free"), float(part.getSize()))
stype = Slice.SLICE
unsel_col = self.part_type_colors["unsel_freespace"]
sel_col = self.part_type_colors["sel_freespace"]
elif part.type & parted.PARTITION_EXTENDED:
partstr = ""
stype = Slice.CONTAINERSLICE
unsel_col = self.part_type_colors["unsel_extended"]
sel_col = self.part_type_colors["sel_extended"]
else:
partstr = "%s\n%.0f MB" % (part.path, float(part.getSize()))
stype = Slice.SLICE
unsel_col = self.part_type_colors["unsel_normal"]
sel_col = self.part_type_colors["sel_normal"]
# We need to use the self.storage objects not the partedDisk ones.
# The free space has not storage object.
if part.type != parted.PARTITION_FREESPACE:
partName = devicePathToName(part.getDeviceNodeName())
o_part = self.storage.devicetree.getDeviceByName(partName)
else:
o_part = None
slice = Slice(stripe, partstr, stype, xoffset, xlength,
dcCB = self.dcCB, cCB = self.cCB, sel_col = sel_col,
unsel_col = unsel_col, obj = o_part)
stripe.addSlice(slice)
return stripe
class LVMStripeGraph(StripeGraph):
"""
storage -- the storage object
cCB -- call back function used when the user clicks on a slice. This function
is passed a device object when its executed.
dcCB -- call back function used when the user double clicks on a slice.
vg -- volume group to display
"""
def __init__(self, storage, vg=None, cCB=lambda x:None, dcCB=lambda:None):
StripeGraph.__init__(self)
self.storage = storage
self.cCB = cCB
self.dcCB = dcCB
# Define the default colors per partition type.
self.part_type_colors = \
{"sel_lv": "cornsilk1", "unsel_lv": "white",
"sel_freespace": "grey88", "unsel_freespace": "grey88"}
if vg:
self.setDisplayed(vg)
def _createStripe(self, vg):
# Create the stripe
vgtext = _("LVM Volume Group %(vgName)s (%(vgSize)-0.f MB)") % {"vgName": vg.name, "vgSize": vg.size}
stripe = Stripe(self.getCanvas(), vgtext, self.dcCB, obj = vg)
# Create the slices.
# Since se don't have a start and length like in the partitions, we
# put all the LVs next to each other and put the free space at the end.
curr_offset = Decimal(0)
for lv in vg.lvs:
lvstr = "%s\n%.0f MB" % (lv.name, float(lv.size))
stype = Slice.SLICE
sel_col = self.part_type_colors["sel_lv"]
unsel_col = self.part_type_colors["unsel_lv"]
#xoffset = float(curr_offset) / float(vg.size)
xoffset = curr_offset
xlength = Decimal(str(lv.size)) / Decimal(str(vg.size))
slice = Slice(stripe, lvstr, stype, xoffset, xlength,
dcCB = self.dcCB, cCB = self.cCB, sel_col = sel_col,
unsel_col = unsel_col, obj = lv)
stripe.addSlice(slice)
curr_offset += xlength
# We add the free space if there is any space left.
if curr_offset < 1:
#freestr = _("Free")
stype = Slice.SLICE
sel_col = self.part_type_colors["sel_freespace"]
unsel_col = self.part_type_colors["unsel_freespace"]
xoffset = curr_offset
xlength = Decimal(1 - curr_offset)
# with the xlength we give an approximate size
freestr = "%s\n%.0f MB" % (_("Free"), Decimal(str(vg.size)) * xlength)
# We append no object.
slice = Slice(stripe, freestr, stype, xoffset, xlength,
dcCB = self.dcCB, cCB = self.cCB, sel_col = sel_col,
unsel_col = unsel_col)
stripe.addSlice(slice)
return stripe
class MDStripeGraph(StripeGraph):
desc = "MD"
"""
storage -- the storage object
cCB -- call back function used when the user clicks on a slice. This function
is passed a device object when its executed.
dcCB -- call back function used when the user double clicks on a slice.
md -- md device to display.
"""
def __init__(self, storage, device=None, cCB=lambda x:None, dcCB=lambda:None):
StripeGraph.__init__(self)
self.storage = storage
self.cCB = cCB
self.dcCB = dcCB
self.part_type_colors = \
{"sel_md": "cornsilk1", "unsel_md": "white"}
if device:
self.setDisplayed(device)
def _get_text(self, md):
return (_("%(desc)s %(mdPath)s (%(mdSize)-0.f MB)")
% {"mdPath": md.path, "mdSize": md.size, "desc": self.desc})
def _createStripe(self, md):
mdtext = self._get_text(md)
stripe = Stripe(self.getCanvas(), mdtext, self.dcCB, obj = md)
# Since we can't really create subslices with md devices we will only
# show the md device size in the bar.
mdstr = "%s\n%.0f MB" % (md.path, float(md.size))
stype = Slice.SLICE
sel_col = self.part_type_colors["sel_md"]
unsel_col = self.part_type_colors["unsel_md"]
xoffset = 0
xlength = 1
slice = Slice(stripe, mdstr, stype, xoffset, xlength,
dcCB = self.dcCB, cCB = self.cCB, sel_col = sel_col,
unsel_col = unsel_col, obj = md)
stripe.addSlice(slice)
return stripe
class MDRaidArrayStripeGraph(MDStripeGraph):
desc = "MD RAID Array"
class BTRFSStripeGraph(MDStripeGraph):
desc = "BTRFS Pool"
def _get_text(self, md):
return (_("%(desc)s %(mdUUID)s (%(mdSize)-0.f MB)")
% {"mdUUID": md.uuid, "mdSize": md.size, "desc": self.desc})
class MessageGraph:
def __init__(self, canvas, message):
self.canvas = canvas
self.message = message
self.canvas_text = None
def display(self):
if self.canvas_text != None:
# This means that its already displayed.
return
self.canvas_text = self.canvas.root().add(gnomecanvas.CanvasText,
x=0.0, y=20, font="sans", size_points=16)
self.canvas_text.set(text=self.message, fill_color='black',
anchor=gtk.ANCHOR_CENTER, weight=pango.WEIGHT_BOLD)
# Trying to center the picture.
apply(self.canvas.set_scroll_region, self.canvas.root().get_bounds())
def destroy(self):
if self.canvas_text:
self.canvas_text.destroy()
self.canvas_text = None
class DiskTreeModelHelper:
def __init__(self, model, columns, iter):
self.model = model
self.iter = iter
self.columns = columns
def __getitem__(self, key):
if type(key) == types.StringType:
key = self.columns[key]
try:
return self.model.get_value(self.iter, key)
except Exception:
# FIXME: what exceptions might actually get raised here?
return None
def __setitem__(self, key, value):
if type(key) == types.StringType:
key = self.columns[key]
self.model.set_value(self.iter, key, value)
class DiskTreeModel(gtk.TreeStore):
isLeaf = -3
isFormattable = -2
# format: column header, type, x alignment, hide?, visibleKey
titles = ((N_("Device"), gobject.TYPE_STRING, 0.0, 0, 0),
(N_("Label"), gobject.TYPE_STRING, 0.0, 1, 0),
(N_("Size (MB)"), gobject.TYPE_STRING, 1.0, 0, 0),
(N_("Mount Point"), gobject.TYPE_STRING, 0.0, 0, isLeaf),
(N_("Type"), gobject.TYPE_STRING, 0.0, 0, 0),
(N_("Format"), gobject.TYPE_OBJECT, 0.5, 0, isFormattable),
("", gobject.TYPE_STRING, 0.0, 0, 0),
# the following must be the last two
("IsLeaf", gobject.TYPE_BOOLEAN, 0.0, 1, 0),
("IsFormattable", gobject.TYPE_BOOLEAN, 0.0, 1, 0),
("PyObject", gobject.TYPE_PYOBJECT, 0.0, 1, 0))
def __init__(self):
self.hiddenPartitions = []
self.titleSlot = {}
i = 0
types = [self]
self.columns = []
for title, kind, alignment, hide, key in self.titles:
self.titleSlot[title] = i
types.append(kind)
if hide:
i += 1
continue
elif kind == gobject.TYPE_OBJECT:
renderer = gtk.CellRendererPixbuf()
propertyMapping = {'pixbuf': i}
elif kind == gobject.TYPE_BOOLEAN:
renderer = gtk.CellRendererToggle()
propertyMapping = {'active': i}
elif (kind == gobject.TYPE_STRING or
kind == gobject.TYPE_INT):
renderer = gtk.CellRendererText()
propertyMapping = {'markup': i}
# wire in the cells that we want only visible on leaf nodes to
# the special leaf node column.
if key < 0:
propertyMapping['visible'] = len(self.titles) + key
renderer.set_property('xalign', alignment)
if title == "Mount Point":
title = _("Mount Point/\nRAID/Volume")
elif title == "Size (MB)":
title = _("Size\n(MB)")
elif title != "":
title = _(title)
col = apply(gtk.TreeViewColumn, (title, renderer),
propertyMapping)
col.set_alignment(0.5)
if kind == gobject.TYPE_STRING or kind == gobject.TYPE_INT:
col.set_property('sizing', gtk.TREE_VIEW_COLUMN_AUTOSIZE)
self.columns.append(col)
i += 1
apply(gtk.TreeStore.__init__, types)
self.view = gtk.TreeView(self)
# append all of the columns
map(self.view.append_column, self.columns)
def getTreeView(self):
return self.view
def selectRowFromObj(self, obj, iter=None):
"""Find the row in the tree containing obj and select it.
obj -- the object that we are searching
iter -- an iter from the tree. If None, get the first one.
Returns the iter where obj was found. None otherwise.
"""
retval = None
r_obj = None
#FIXME: watch out for hidden rows.
if not iter:
iter = self.get_iter_first()
while iter:
# r_obj -> (row object)
r_obj = self[iter]["PyObject"]
if obj and r_obj == obj:
# We have fond our object, select this row and break.
selection = self.view.get_selection()
if selection is not None:
selection.unselect_all()
selection.select_iter(iter)
# Make sure the tree view shows what we have selected.
path = self.get_path(iter)
col = self.view.get_column(0)
self.view.set_cursor(path, col, False)
self.view.scroll_to_cell(path, col, True, 0.5, 0.5)
retval = iter
break
if self.iter_has_child(iter):
# Call recursively if row has children.
rv = self.selectRowFromObj(obj, iter=self.iter_children(iter))
if rv != None:
retval = rv
break
iter = self.iter_next(iter)
return iter
def getCurrentDevice(self):
""" Return the device representing the current selection,
None otherwise.
"""
selection = self.view.get_selection()
model, iter = selection.get_selected()
if not iter:
return None
return model[iter]['PyObject']
def getCurrentDeviceParent(self):
""" Return the parent of the selected row. Returns an iter.
None if there is no parent.
"""
selection = self.view.get_selection()
model, iter = selection.get_selected()
if not iter:
return None
return model.iter_parent(iter)
def resetSelection(self):
pass
def clear(self):
selection = self.view.get_selection()
if selection is not None:
selection.unselect_all()
gtk.TreeStore.clear(self)
def __getitem__(self, iter):
if type(iter) == gtk.TreeIter:
return DiskTreeModelHelper(self, self.titleSlot, iter)
raise KeyError, iter
class PartitionWindow(InstallWindow):
def __init__(self, ics):
InstallWindow.__init__(self, ics)
ics.setTitle(_("Partitioning"))
ics.setNextEnabled(True)
self.parent = ics.getICW().window
def quit(self):
pass
def presentPartitioningComments(self,title, labelstr1, labelstr2, comments,
type="ok", custom_buttons=None):
if flags.autostep:
return 1
win = gtk.Dialog(title)
gui.addFrame(win)
if type == "ok":
win.add_button('gtk-ok', 1)
defaultchoice = 0
elif type == "yesno":
win.add_button('gtk-no', 2)
win.add_button('gtk-yes', 1)
defaultchoice = 1
elif type == "continue":
win.add_button('gtk-cancel', 0)
win.add_button(_("Continue"), 1)
defaultchoice = 1
elif type == "custom":
rid=0
for button in custom_buttons:
widget = win.add_button(button, rid)
rid = rid + 1
defaultchoice = rid - 1
image = gtk.Image()
image.set_from_stock('gtk-dialog-warning', gtk.ICON_SIZE_DIALOG)
hbox = gtk.HBox(False, 9)
al=gtk.Alignment(0.0, 0.0)
al.add(image)
hbox.pack_start(al, False)
buffer = gtk.TextBuffer(None)
buffer.set_text(comments)
text = gtk.TextView()
text.set_buffer(buffer)
text.set_property("editable", False)
text.set_property("cursor_visible", False)
text.set_wrap_mode(gtk.WRAP_WORD)
sw = gtk.ScrolledWindow()
sw.add(text)
sw.set_size_request(400, 200)
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
sw.set_shadow_type(gtk.SHADOW_IN)
info1 = gtk.Label(labelstr1)
info1.set_line_wrap(True)
info1.set_size_request(400, -1)
info2 = gtk.Label(labelstr2)
info2.set_line_wrap(True)
info2.set_size_request(400, -1)
vbox = gtk.VBox(False, 9)
al=gtk.Alignment(0.0, 0.0)
al.add(info1)
vbox.pack_start(al, False)
vbox.pack_start(sw, True, True)
al=gtk.Alignment(0.0, 0.0)
al.add(info2)
vbox.pack_start(al, True)
hbox.pack_start(vbox, True, True)
win.vbox.pack_start(hbox)
win.set_position(gtk.WIN_POS_CENTER)
win.set_default_response(defaultchoice)
win.show_all()
rc = win.run()
win.destroy()
return rc
def getNext(self):
(errors, warnings) = self.storage.sanityCheck()
if errors:
labelstr1 = _("The partitioning scheme you requested "
"caused the following critical errors.")
labelstr2 = _("You must correct these errors before "
"you continue your installation of "
"%s.") % (productName,)
commentstr = string.join(errors, "\n\n")
self.presentPartitioningComments(_("Partitioning Errors"),
labelstr1, labelstr2,
commentstr, type="ok")
raise gui.StayOnScreen
if warnings:
# "storage configuration"
labelstr1 = _("The partitioning scheme you requested "
"generated the following warnings.")
labelstr2 = _("Would you like to continue with "
"your requested partitioning "
"scheme?")
commentstr = string.join(warnings, "\n\n")
rc = self.presentPartitioningComments(_("Partitioning Warnings"),
labelstr1, labelstr2,
commentstr,
type="yesno")
if rc != 1:
raise gui.StayOnScreen
formatWarnings = getPreExistFormatWarnings(self.storage)
if formatWarnings:
labelstr1 = _("The following pre-existing devices have been "
"selected to be formatted, destroying all data.")
# labelstr2 = _("Select 'Yes' to continue and format these "
# "partitions, or 'No' to go back and change these "
# "settings.")
labelstr2 = ""
commentstr = ""
for (dev, type, mntpt) in formatWarnings:
commentstr = commentstr + \
"%s %s %s\n" % (dev,type,mntpt)
rc = self.presentPartitioningComments(_("Format Warnings"),
labelstr1, labelstr2,
commentstr,
type="custom",
custom_buttons=["gtk-cancel",
_("_Format")])
if rc != 1:
raise gui.StayOnScreen
self.stripeGraph.shutDown()
self.tree.clear()
del self.parent
return None
def getPrev(self):
self.stripeGraph.shutDown()
self.tree.clear()
del self.parent
return None
def addDevice(self, device, treeiter):
if device.format.hidden:
return
if device.format.type == "luks":
# we'll want to grab format info from the mapped
# device, not the encrypted one
try:
dm_dev = self.storage.devicetree.getChildren(device)[0]
except IndexError:
format = device.format
else:
format = dm_dev.format
else:
format = device.format
# icon for the format column
if device.format.type == "luks" and not device.format.exists:
# we're creating the LUKS header
format_icon = self.lock_pixbuf
elif not format.exists:
# we're creating a format on the device
format_icon = self.checkmark_pixbuf
else:
format_icon = None
# mount point string
if format.type == "lvmpv":
vg = None
for _vg in self.storage.vgs:
if _vg.dependsOn(device):
vg = _vg
break
mnt_str = getattr(vg, "name", "")
elif format.type == "mdmember":
array = None
for _array in self.storage.mdarrays:
if _array.dependsOn(device):
array = _array
break
mnt_str = getattr(array, "name", "")
elif format.type == "btrfs" and not isinstance(device, BTRFSVolumeDevice):
btrfs_dev = self.storage.devicetree.getChildren(device)[0]
mnt_str = btrfs_dev.name
else:
mnt_str = getattr(format, "mountpoint", "")
if mnt_str is None:
mnt_str = ""
isleaf = True
# device name
name_str = getattr(device, "lvname", device.name)
# label
label_str = getattr(format, "label", "")
if label_str is None:
label_str = ""
self.tree[treeiter]['Device'] = name_str
self.tree[treeiter]['Size (MB)'] = "%Ld" % device.size
self.tree[treeiter]['PyObject'] = device
self.tree[treeiter]['IsFormattable'] = format.formattable
self.tree[treeiter]['Format'] = format_icon
self.tree[treeiter]['Mount Point'] = mnt_str
self.tree[treeiter]['IsLeaf'] = isleaf
self.tree[treeiter]['Type'] = format.name
self.tree[treeiter]['Label'] = label_str
# XXX can this move up one level?
if isinstance(device, BTRFSVolumeDevice):
# list subvolumes as children of the main volume
for s in device.subvolumes:
log.debug("%r" % s.format)
isleaf = False
if s.format.exists:
sub_format_icon = None
else:
sub_format_icon = self.checkmark_pixbuf
subvol_iter = self.tree.append(treeiter)
self.tree[subvol_iter]['Device'] = s.name
self.tree[subvol_iter]['PyObject'] = s
self.tree[subvol_iter]['IsFormattable'] = True
self.tree[subvol_iter]['Format'] = sub_format_icon
self.tree[subvol_iter]['Mount Point'] = s.format.mountpoint
self.tree[subvol_iter]['Type'] = s.type
self.tree[subvol_iter]['IsLeaf'] = True
def populate(self, initial = 0):
self.tree.resetSelection()
# first do LVM
vgs = self.storage.vgs
if vgs:
lvmparent = self.tree.append(None)
self.tree[lvmparent]['Device'] = _("LVM Volume Groups")
for vg in vgs:
vgparent = self.tree.append(lvmparent)
self.addDevice(vg, vgparent)
self.tree[vgparent]['Type'] = ""
for lv in vg.lvs:
iter = self.tree.append(vgparent)
self.addDevice(lv, iter)
# We add a row for the VG free space.
if vg.freeSpace > 0:
iter = self.tree.append(vgparent)
self.tree[iter]['Device'] = _("Free")
self.tree[iter]['Size (MB)'] = str(vg.freeSpace)
self.tree[iter]['PyObject'] = None
self.tree[iter]['Mount Point'] = ""
self.tree[iter]['IsLeaf'] = True
# handle RAID next
mdarrays = self.storage.mdarrays
if mdarrays:
raidparent = self.tree.append(None)
self.tree[raidparent]['Device'] = _("RAID Devices")
for array in mdarrays:
iter = self.tree.append(raidparent)
self.addDevice(array, iter)
name = "%s <span size=\"small\" color=\"gray\">(%s)</span>" % \
(array.name, array.path)
self.tree[iter]['Device'] = name
# BTRFS volumes
btrfs_devs = self.storage.btrfsVolumes
if btrfs_devs:
btrfsparent = self.tree.append(None)
self.tree[btrfsparent]['Device'] = _("BTRFS Volumes")
for dev in btrfs_devs:
iter = self.tree.append(btrfsparent)
self.addDevice(dev, iter)
# now normal partitions
disks = self.storage.partitioned
# also include unpartitioned disks that aren't mpath or biosraid
whole = filter(lambda d: not d.partitioned and not d.format.hidden,
self.storage.disks)
disks.extend(whole)
disks.sort(key=lambda d: d.name)
drvparent = self.tree.append(None)
self.tree[drvparent]['Device'] = _("Hard Drives")
for disk in disks:
# add a parent node to the tree
parent = self.tree.append(drvparent)
self.tree[parent]['PyObject'] = disk
if disk.partitioned:
part = disk.format.firstPartition
extendedParent = None
while part:
if part.type & parted.PARTITION_METADATA:
part = part.nextPartition()
continue
partName = devicePathToName(part.getDeviceNodeName())
device = self.storage.devicetree.getDeviceByName(partName)
if not device and not part.type & parted.PARTITION_FREESPACE:
log.debug("can't find partition %s in device"
" tree" % partName)
# ignore any free space region that is less than the
# grain size of the disklabel alignment we are using
if part.type & parted.PARTITION_FREESPACE:
min_length = disk.format.alignment.grainSize
if part.type & parted.PARTITION_LOGICAL:
# ignored free regions in the extended can be up
# to twice the alignment grain size, to account
# for logical partition metadata
min_length *= 2
if part.geometry.length < min_length:
part = part.nextPartition()
continue
if device and device.isExtended:
if extendedParent:
raise RuntimeError, ("can't handle more than "
"one extended partition per disk")
extendedParent = self.tree.append(parent)
iter = extendedParent
elif part.type & parted.PARTITION_LOGICAL:
if not extendedParent:
raise RuntimeError, ("crossed logical partition "
"before extended")
iter = self.tree.append(extendedParent)
else:
iter = self.tree.append(parent)
if device and not device.isExtended:
self.addDevice(device, iter)
else:
# either extended or freespace
if part.type & parted.PARTITION_FREESPACE:
devstring = _("Free")
ptype = ""
else:
devstring = partName
ptype = _("Extended")
self.tree[iter]['Device'] = devstring
self.tree[iter]['Type'] = ptype
size = part.getSize(unit="MB")
if size < 1.0:
sizestr = "< 1"
else:
sizestr = "%Ld" % (size)
self.tree[iter]['Size (MB)'] = sizestr
self.tree[iter]['PyObject'] = device
part = part.nextPartition()
else:
# whole-disk formatting
self.addDevice(disk, parent)
ident = None
try:
if disk.type == "dasd" or disk.type == "zfcp":
ident = deviceNameToDiskByPath(disk.name)
if ident.startswith("/dev/disk/by-path/"):
ident = os.path.basename(ident)
elif disk.type == "dm-multipath":
ident = disk.wwid
except DeviceNotFoundError:
ident = None
if not ident:
ident = disk.path
# Insert a '\n' when device string is too long. Usually when it
# contains '/dev/mapper'. First column should be around 20 chars.
if len(disk.name) + len(ident) > 20:
separator = "\n"
else:
separator= " "
self.tree[parent]['Device'] = \
"%s%s<span size=\"small\" color=\"gray\">(%s)</span>" \
% (disk.name, separator, ident)
self.treeView.expand_all()
self.messageGraph.display()
def barviewActivateCB(self):
""" Should be called when we double click on a slice"""
# This is a bit of a hack to make the double click on free space work.
# This function is useful when the selected slice is a free space,
# in any other case it calls self.treeActiveCB.
# We first see if the double click was from a free space or from another
# slice.
sel_slice = self.stripeGraph.getSelectedSlice()
if sel_slice == None:
# This really should not happen. Do nothing.
return
# The selected slice is a free slice if the object contained in it is
# None.
if sel_slice.obj != None:
# This is not a free slice, we should call treeActivateCB
return self.treeActivateCB()
else:
# Display a create window according to the stripe object.
# Get the device from the stripe.obj
disp_stripe = self.stripeGraph.getDisplayed()
if disp_stripe == None:
# this should not happen
return
# Display a create dialog.
stripe_dev = disp_stripe.obj
if stripe_dev.partitioned:
tempformat = self.storage.defaultFSType
device = self.storage.newPartition(fmt_type=tempformat)
self.editPartition(device, isNew = True)
elif isinstance(stripe_dev, storage.LVMVolumeGroupDevice):
self.editLVMLogicalVolume(vg = stripe_dev)
return
def treeActivateCB(self, *args):
curr_dev = self.tree.getCurrentDevice()
if isinstance(curr_dev, storage.PartitionDevice) and \
not curr_dev.isExtended:
self.editCB()
elif isinstance(curr_dev, storage.LVMLogicalVolumeDevice) \
or isinstance(curr_dev, storage.LVMVolumeGroupDevice) \
or isinstance(curr_dev, storage.MDRaidArrayDevice):
self.editCB()
elif curr_dev == None:
# Its probably a free space
iparent = self.tree.getCurrentDeviceParent()
if iparent == None:
# it was not free space, it is a root row.
return
# We execute a create function given the type of parent that was
# found.
# FIXME: This code might repeat itself. might be a good idea to
# put it in a function.
curr_parent = self.tree[iparent]["PyObject"]
if curr_parent.partitioned:
tempformat = self.storage.defaultFSType
device = self.storage.newPartition(fmt_type=tempformat)
self.editPartition(device, isNew = True)
elif isinstance(curr_parent, storage.LVMVolumeGroupDevice):
self.editLVMLogicalVolume(vg = curr_parent)
return
def treeSelectCB(self, selection, *args):
# The edit and create buttons will be enabled if the user has chosen
# something editable and/or deletable.
self.deleteButton.set_sensitive(False)
self.editButton.set_sensitive(False)
# I have no idea why this iter might be None. Its best to return
# without any action.
model, iter = selection.get_selected()
if not iter:
return
# If we return because there is no parent, make sure we show the user
# the infoGraph and no stripeGraph. The 'create' and 'delete' buttons
# will be deactivated.
iparent = model.iter_parent(iter)
if not iparent:
self.stripeGraph.shutDown()
self.messageGraph.display()
return # This is a root row.
# We destroy the message first. We will make sure to repaint it later
# if no stipe is displayed. Can't destroy it at the end of this func
# because it uncenters the created stripe, if any.
self.messageGraph.destroy()
device = model[iter]['PyObject']
# See if we need to change what is in the canvas. In all possibilities
# we must make sure we have the correct StripeGraph class.
if not device:
# This is free space.
parent = self.tree[iparent]["PyObject"]
if parent.partitioned:
if not isinstance(self.stripeGraph, DiskStripeGraph):
self.stripeGraph.shutDown()
self.stripeGraph = DiskStripeGraph(self.storage,
drive = parent, cCB = self.tree.selectRowFromObj,
dcCB = self.barviewActivateCB)
self.stripeGraph.setDisplayed(parent)
elif isinstance(parent, storage.LVMVolumeGroupDevice):
if not isinstance(self.stripeGraph, LVMStripeGraph):
self.stripeGraph.shutDown()
self.stripeGraph = LVMStripeGraph(self.storage,
vg = parent, cCB = self.tree.selectRowFromObj,
dcCB = self.barviewActivateCB)
self.stripeGraph.setDisplayed(parent)
elif device.partitioned:
if not isinstance(self.stripeGraph, DiskStripeGraph):
self.stripeGraph.shutDown()
self.stripeGraph = DiskStripeGraph(self.storage,
drive = device,
cCB = self.tree.selectRowFromObj,
dcCB = self.barviewActivateCB)
self.stripeGraph.setDisplayed(device)
# this is deletable but not editable.
self.deleteButton.set_sensitive(True)
elif isinstance(device, storage.PartitionDevice):
if not isinstance(self.stripeGraph, DiskStripeGraph):
self.stripeGraph.shutDown()
self.stripeGraph = DiskStripeGraph(self.storage,
drive = device.parents[0],
cCB = self.tree.selectRowFromObj,
dcCB = self.barviewActivateCB)
self.stripeGraph.setDisplayed(device.parents[0])
self.stripeGraph.selectSliceFromObj(device)
self.deleteButton.set_sensitive(True)
if not device.isExtended:
self.editButton.set_sensitive(True)
elif isinstance(device, storage.LVMVolumeGroupDevice):
if not isinstance(self.stripeGraph, LVMStripeGraph):
self.stripeGraph.shutDown()
self.stripeGraph = LVMStripeGraph(self.storage, vg = device,
cCB = self.tree.selectRowFromObj,
dcCB = self.barviewActivateCB)
self.stripeGraph.setDisplayed(device)
self.deleteButton.set_sensitive(True)
self.editButton.set_sensitive(True)
elif isinstance(device, storage.LVMLogicalVolumeDevice):
if not isinstance(self.stripeGraph, LVMStripeGraph):
self.stripeGraph.shutDown()
self.stripeGraph = LVMStripeGraph(self.storage, vg = device.vg,
cCB = self.tree.selectRowFromObj,
dcCB = self.barviewActivateCB)
self.stripeGraph.setDisplayed(device.vg)
self.stripeGraph.selectSliceFromObj(device)
self.deleteButton.set_sensitive(True)
self.editButton.set_sensitive(True)
elif isinstance(device, storage.MDRaidArrayDevice):
if not isinstance(self.stripeGraph, MDRaidArrayStripeGraph):
self.stripeGraph.shutDown()
self.stripeGraph = MDRaidArrayStripeGraph(self.storage,
device = device,
cCB = self.tree.selectRowFromObj,
dcCB = self.barviewActivateCB)
self.stripeGraph.setDisplayed(device)
self.deleteButton.set_sensitive(True)
self.editButton.set_sensitive(True)
elif isinstance(device, storage.BTRFSDevice):
# BTRFSDevice can be edited but not explicitly deleted. It is
# deleted when its last member device is removed.
if not isinstance(self.stripeGraph, BTRFSStripeGraph):
self.stripeGraph.shutDown()
self.stripeGraph = BTRFSStripeGraph(self.storage,
device = device,
cCB = self.tree.selectRowFromObj,
dcCB = self.barviewActivateCB)
self.stripeGraph.setDisplayed(device)
self.deleteButton.set_sensitive(False)
self.editButton.set_sensitive(True)
else:
# This means that the user selected something that is not showable
# in the bar view. Just show the information message.
self.stripeGraph.shutDown()
self.messageGraph.display()
self.deleteButton.set_sensitive(False)
self.editButton.set_sensitive(False)
def deleteCB(self, widget):
""" Right now we can say that if the device is partitioned we
want to delete all of the devices it contains. At some point
we will want to support creation and removal of partitionable
devices. This will need some work when that time comes.
"""
device = self.tree.getCurrentDevice()
if device.partitioned:
if doClearPartitionedDevice(self.intf,
self.storage,
device):
self.refresh()
elif doDeleteDevice(self.intf,
self.storage,
device):
if isinstance(device, storage.devices.PartitionDevice):
justRedraw = False
else:
justRedraw = True
if device.type == "lvmlv" and device in device.vg.lvs:
device.vg._removeLogVol(device)
self.refresh(justRedraw=justRedraw)
def createCB(self, *args):
# First we must decide what parts of the create_storage_dialog
# we will activate.
activate_create_partition = True
# We activate the create Volume Group radio button if there is a free
# partition with a Physical Volume format.
activate_create_vg = False
availpvs = len(self.storage.unusedPVs())
if (lvm.has_lvm()
and getFormat("lvmpv").supported
and availpvs > 0):
activate_create_vg = True
# We activate the create RAID dev if there are partitions that have
# raid format and are not related to any raid dev.
activate_create_raid_dev = False
availraidparts = len(self.storage.unusedMDMembers())
availminors = self.storage.unusedMDMinors
if (len(availminors) > 0
and getFormat("software RAID").supported
and availraidparts > 1):
activate_create_raid_dev = True
# Must check if all the possibilities are False. In this case tell the
# user that he can't create anything and the reasons.
if (not activate_create_partition
and not activate_create_vg
and not activate_create_raid_dev):
self.intf.messageWindow(_("Cannot perform any creation action"),
_("Note that the creation action requires one of the "
"following:\n\n"
"* Free space in one of the Hard Drives.\n"
"* At least two free Software RAID partitions.\n"
"* At least one free physical volume (LVM) partition.\n"
"* At least one Volume Group with free space."),
custom_icon="warning")
return
# We will activate the create lv button when we have a VG to put the
# LVs on.
activate_create_lv = False
vgs_with_free_space = []
for vg in self.storage.vgs:
if vg.freeSpace > 0:
vgs_with_free_space.append(vg)
if len(vgs_with_free_space) > 0:
activate_create_lv = True
# GTK crap starts here.
create_storage_xml = gtk.glade.XML(
gui.findGladeFile("create-storage.glade"), domain="anaconda")
self.dialog = create_storage_xml.get_widget("create_storage_dialog")
# Activate the partition radio buttons if needed.
# sp_rb -> standard partition
sp_rb = create_storage_xml.get_widget("create_storage_rb_standard_part")
# lp_rb -> lvm partition (physical volume)
lp_rb = create_storage_xml.get_widget("create_storage_rb_lvm_part")
# rp_rb -> RAID partition
rp_rb = create_storage_xml.get_widget("create_storage_rb_raid_part")
if activate_create_partition:
sp_rb.set_sensitive(True)
lp_rb.set_sensitive(True)
rp_rb.set_sensitive(True)
# Activate the Volume Group radio buttons if needed.
# vg_rb -> Volume Group
vg_rb = create_storage_xml.get_widget("create_storage_rb_lvm_vg")
if activate_create_vg:
vg_rb.set_sensitive(True)
# Activate the Logical Volume radio button if needed.
# We must also take care to control the combo box.
lv_rb = create_storage_xml.get_widget("create_storage_rb_lvm_lv")
if activate_create_lv:
# The combobox will be visible if the radio button is active.
# The combobox will be sensitive when the radio button is active.
def toggle_vg_cb_CB(button, vg_cb, selected_dev):
if button.get_active():
vg_cb.set_sensitive(True)
# We set the VG to whatever the user has chosen in the tree
# view. We will fall back on the first item on the list if
# there is no chosen VG.
if selected_dev and selected_dev.name \
and vg_cb.set_active_text(selected_dev.name):
# if set_active is True, we don't need to do anything else
pass
else:
vg_cb.set_active_text(vgs_with_free_space[0].name)
else:
vg_cb.set_sensitive(False)
vg_cb_st = gtk.TreeStore(gobject.TYPE_STRING, gobject.TYPE_PYOBJECT)
vg_cb = datacombo.DataComboBox(store = vg_cb_st)
vg_cb.set_sensitive(False)
for vg in vgs_with_free_space:
# FIXME: the name length might be a problem.
vg_cb.append(vg.name, vg)
lv_hb = create_storage_xml.get_widget("create_storage_hb_lvm_lv")
lv_hb.pack_start(vg_cb)
lv_rb.set_sensitive(True)
selected_dev = self.tree.getCurrentDevice()
lv_rb.connect("toggled", toggle_vg_cb_CB, vg_cb, selected_dev)
# Activate the RAID dev if needed.
# rd_rb -> RAID device
rd_rb = create_storage_xml.get_widget("create_storage_rb_raid_dev")
if activate_create_raid_dev:
rd_rb.set_sensitive(True)
# Before drawing lets select the first radio button that is sensitive:
# How can I get sensitivity from gtk.radiobutton?
if activate_create_partition:
sp_rb.set_active(True)
sp_rb.grab_focus()
elif activate_create_vg:
vg_rb.set_active(True)
vg_rb.grab_focus()
elif activate_create_raid_dev:
rd_rb.set_active(True)
rd_rb.grab_focus()
gui.addFrame(self.dialog)
self.dialog.show_all()
# Lets work the information messages with CB
# The RAID info message
rinfo_button = create_storage_xml.get_widget("create_storage_info_raid")
whatis_r = _("Software RAID allows you to combine several disks into "
"a larger RAID device. A RAID device can be configured "
"to provide additional speed and reliability compared "
"to using an individual drive. For more information on "
"using RAID devices please consult the %s "
"documentation.\n") % (productName,)
whatneed_r = _("To use RAID you must first create at least two "
"partitions of type 'software RAID'. Then you can create a "
"RAID device that can be formatted and mounted.\n\n")
whathave_r = P_(
"You currently have %d software RAID partition free to use.",
"You currently have %d software RAID partitions free to use.",
availraidparts) % (availraidparts,)
rinfo_message = "%s\n%s%s" % (whatis_r, whatneed_r, whathave_r)
rinfo_cb = lambda x : self.intf.messageWindow(_("About RAID"),
rinfo_message, custom_icon="information")
rinfo_button.connect("clicked", rinfo_cb)
# The LVM info message
lvminfo_button = create_storage_xml.get_widget("create_storage_info_lvm")
whatis_lvm = _("Logical Volume Manager (LVM) is a 3 level construct. "
"The first level is made up of disks or partitions formatted with "
"LVM metadata called Physical Volumes (PV). A Volume Group "
"(VG) sits on top of one or more PVs. The VG, in turn, is the "
"base to create one or more Logical Volumes (LV). Note that a "
"VG can be an aggregate of PVs from multiple physical disks. For "
"more information on using LVM please consult the %s "
"documentation\n") % (productName, )
whatneed_lvm = _("To create a PV you need a partition with "
"free space. To create a VG you need a PV that is not "
"part of any existing VG. To create an LV you need a VG with "
"free space.\n\n")
whathave_lvm = P_("You currently have %d available PV free to use.\n",
"You currently have %d available PVs free to use.\n",
availpvs) % (availpvs, )
lvminfo_message = "%s\n%s%s" % (whatis_lvm, whatneed_lvm, whathave_lvm)
lvminfo_cb = lambda x : self.intf.messageWindow(_("About LVM"),
lvminfo_message, custom_icon="information")
lvminfo_button.connect("clicked", lvminfo_cb)
dialog_rc = self.dialog.run()
# If Cancel was pressed
if dialog_rc == 0:
self.dialog.destroy()
return
# If Create was pressed Make sure we do a dialog.destroy before
# calling any other screen. We don't want the create dialog to show
# in the back when we pop up other screens.
if dialog_rc != 1:
log.error("I received a dialog_rc != 1 (%d) witch should not "
"happen" % dialog_rc)
self.dialog.destroy()
return
self.dialog.destroy()
if rp_rb.get_active():
member = self.storage.newPartition(fmt_type="mdmember")
self.editPartition(member, isNew = True, restrictfs=["mdmember"])
return
elif rd_rb.get_active():
array = self.storage.newMDArray(fmt_type=self.storage.defaultFSType)
self.editRaidArray(array, isNew = True)
return
elif lp_rb.get_active():
member = self.storage.newPartition(fmt_type="lvmpv")
self.editPartition(member, isNew = True, restrictfs=["lvmpv"])
return
elif vg_rb.get_active():
tempvg = self.storage.newVG()
self.editLVMVolumeGroup(tempvg, isNew = True)
return
elif lv_rb.get_active():
selected_vg = vg_cb.get_active_value()
self.editLVMLogicalVolume(vg = selected_vg)
return
elif sp_rb.get_active():
tempformat = self.storage.defaultFSType
device = self.storage.newPartition(fmt_type=tempformat)
self.editPartition(device, isNew = True)
return
def resetCB(self, *args):
if not confirmResetPartitionState(self.intf):
return
self.stripeGraph.shutDown()
# temporarily unset storage.config.clearPartType so that all devices
# will be found during storage reset
clearPartType = self.storage.config.clearPartType
self.storage.config.clearPartType = None
self.storage.reset()
self.storage.config.clearPartType = clearPartType
self.tree.clear()
self.populate()
def refresh(self, justRedraw=None):
log.debug("refresh: justRedraw=%s" % justRedraw)
self.stripeGraph.shutDown()
self.tree.clear()
if justRedraw:
rc = 0
else:
try:
doPartitioning(self.storage)
rc = 0
except PartitioningError as msg:
self.intf.messageWindow(_("Error Partitioning"),
_("Could not allocate requested partitions: %s.") % (msg),
custom_icon="error")
rc = -1
except PartitioningWarning as msg:
# XXX somebody other than me should make this look better
# XXX this doesn't handle the 'delete /boot partition spec' case
# (it says 'add anyway')
dialog = gtk.MessageDialog(self.parent, 0, gtk.MESSAGE_WARNING,
gtk.BUTTONS_NONE,
_("Warning: %s.") % (msg))
gui.addFrame(dialog)
button = gtk.Button(_("_Modify Partition"))
dialog.add_action_widget(button, 1)
button = gtk.Button(_("_Continue"))
dialog.add_action_widget(button, 2)
dialog.set_position(gtk.WIN_POS_CENTER)
dialog.show_all()
rc = dialog.run()
dialog.destroy()
if rc == 1:
rc = -1
else:
rc = 0
all_devices = self.storage.devicetree.devices
bootDevs = [d for d in all_devices if d.bootable]
#if reqs:
# for req in reqs:
# req.ignoreBootConstraints = 1
if not rc == -1:
self.populate()
return rc
def editCB(self, *args):
device = self.tree.getCurrentDevice()
reason = self.storage.deviceImmutable(device, ignoreProtected=True)
if reason:
self.intf.messageWindow(_("Unable To Edit"),
_("You cannot edit this device:\n\n%s")
% reason,
custom_icon="error")
return
if device.type == "mdarray":
self.editRaidArray(device)
elif device.type == "lvmvg":
self.editLVMVolumeGroup(device)
elif device.type == "lvmlv":
self.editLVMLogicalVolume(lv = device)
elif isinstance(device, storage.devices.PartitionDevice):
self.editPartition(device)
# isNew implies that this request has never been successfully used before
def editRaidArray(self, raiddev, isNew = False):
# r_d_g -> raid_dialog_gui
raideditor = r_d_g.RaidEditor(self.storage, self.intf, self.parent,
raiddev, isNew)
while True:
actions = raideditor.run()
for action in actions:
# FIXME: this needs to handle exceptions
self.storage.devicetree.registerAction(action)
if self.refresh(justRedraw=True):
actions.reverse()
for action in actions:
self.storage.devicetree.cancelAction(action)
if self.refresh():
raise RuntimeError, ("Returning partitions to state "
"prior to RAID edit failed")
continue
else:
break
raideditor.destroy()
def editPartition(self, device, isNew = False, restrictfs = None):
# p_d_g -> partition_dialog_gui
parteditor = p_d_g.PartitionEditor(self.anaconda, self.parent, device,
isNew = isNew, restrictfs = restrictfs)
while True:
orig_device = copy.copy(device)
actions = parteditor.run()
for action in actions:
# XXX we should handle exceptions here
self.anaconda.storage.devicetree.registerAction(action)
if self.refresh(justRedraw=not actions):
# autopart failed -- cancel the actions and try to get
# back to previous state
actions.reverse()
for action in actions:
self.anaconda.storage.devicetree.cancelAction(action)
# FIXME: proper action/device management would be better
if not isNew:
device.req_size = orig_device.req_size
device.req_base_size = orig_device.req_base_size
device.req_grow = orig_device.req_grow
device.req_max_size = orig_device.req_max_size
device.req_primary = orig_device.req_primary
device.req_disks = orig_device.req_disks
if self.refresh():
# this worked before and doesn't now...
raise RuntimeError, ("Returning partitions to state "
"prior to edit failed")
else:
break
parteditor.destroy()
return 1
def editLVMVolumeGroup(self, device, isNew = False):
# l_d_g -> lvm_dialog_gui
vgeditor = l_d_g.VolumeGroupEditor(self.anaconda, self.intf, self.parent,
device, isNew)
while True:
actions = vgeditor.run()
for action in actions:
# FIXME: handle exceptions
self.storage.devicetree.registerAction(action)
if self.refresh(justRedraw=True):
actions.reverse()
for action in actions:
self.storage.devicetree.cancelAction(action)
if self.refresh():
raise RuntimeError, ("Returning partitions to state "
"prior to edit failed")
continue
else:
break
vgeditor.destroy()
def editLVMLogicalVolume (self, lv = None, vg = None):
"""Will be consistent with the state of things and use this funciton
for creating and editing LVs.
lv -- the logical volume to edit. If this is set there is no need
for the other two arguments.
vg -- the volume group where the new lv is going to be created. This
will only be relevant when we are createing an LV.
"""
if lv != None:
# l_d_g -> lvm_dialog_gui
vgeditor = l_d_g.VolumeGroupEditor(self.anaconda, self.intf, self.parent,
lv.vg, isNew = False)
lv = vgeditor.lvs[lv.lvname]
isNew = False
elif vg != None:
# l_d_g -> lvm_dialog_gui
vgeditor = l_d_g.VolumeGroupEditor(self.anaconda, self.intf, self.parent,
vg, isNew = False)
tempvg = vgeditor.getTempVG()
name = self.storage.suggestDeviceName(parent=tempvg, prefix="lv")
format = getFormat(self.storage.defaultFSType)
vgeditor.lvs[name] = {'name': name,
'size': vg.freeSpace,
'format': format,
'originalFormat': format,
'stripes': 1,
'logSize': 0,
'snapshotSpace': 0,
'exists': False}
lv = vgeditor.lvs[name]
isNew = True
else:
# This is non-sense.
return
while True:
vgeditor.editLogicalVolume(lv, isNew = isNew)
actions = vgeditor.convertToActions()
for action in actions:
# FIXME: handle exceptions
self.storage.devicetree.registerAction(action)
if self.refresh(justRedraw=True):
actions.reverse()
for action in actions:
self.storage.devicetree.cancelAction(action)
if self.refresh():
raise RuntimeError, ("Returning partitions to state "
"prior to edit failed")
continue
else:
break
vgeditor.destroy()
def getScreen(self, anaconda):
self.anaconda = anaconda
self.storage = anaconda.storage
self.intf = anaconda.intf
self.checkmark_pixbuf = gui.getPixbuf("checkMark.png")
self.lock_pixbuf = gui.getPixbuf("gnome-lock.png")
checkForSwapNoMatch(anaconda)
# Beginning of the GTK stuff.
# create the operational buttons
buttonBox = gtk.HButtonBox()
buttonBox.set_spacing(6)
buttonBox.set_layout(gtk.BUTTONBOX_END)
ops = ((_("_Create"), self.createCB),
(_("_Edit"), self.editCB),
(_("_Delete"), self.deleteCB),
(_("Re_set"), self.resetCB))
for label, cb in ops:
button = gtk.Button(label)
buttonBox.add (button)
button.connect ("clicked", cb)
# We need these to control their sensitivity.
if label == _("_Edit"):
self.editButton = button
self.editButton.set_sensitive(False)
elif label == _("_Delete"):
self.deleteButton = button
self.deleteButton.set_sensitive(False)
# Create the disk tree (Fills the tree and the Bar View)
self.tree = DiskTreeModel()
self.treeView = self.tree.getTreeView()
self.treeView.connect('row-activated', self.treeActivateCB)
self.treeViewSelection = self.treeView.get_selection()
self.treeViewSelection.connect("changed", self.treeSelectCB)
self.stripeGraph = StripeGraph()
self.messageGraph = MessageGraph(self.stripeGraph.getCanvas(),
_("Please Select A Device"))
self.populate(initial = 1)
# Create the top scroll window
# We don't actually need a *scroll* window but nuthing else worked.
hadj = gtk.Adjustment(step_incr = 5.0)
vadj = gtk.Adjustment(step_incr = 5.0)
swt = gtk.ScrolledWindow(hadjustment = hadj, vadjustment = vadj)
swt.add(self.stripeGraph.getCanvas())
swt.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
swt.set_shadow_type(gtk.SHADOW_IN)
# Create the bottom scroll window
swb = gtk.ScrolledWindow()
swb.add(self.treeView)
swb.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
swb.set_shadow_type(gtk.SHADOW_IN)
# Create main vertical box and add everything.
MVbox = gtk.VBox(False, 5)
MVbox.pack_start(swt, False, False)
MVbox.pack_start(swb, True)
MVbox.pack_start(buttonBox, False, False)
MVbox.pack_start(gtk.HSeparator(), False)
return MVbox
| gpl-2.0 |
alikins/ansible | lib/ansible/modules/cloud/amazon/efs.py | 14 | 24211 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: efs
short_description: create and maintain EFS file systems
description:
- Module allows create, search and destroy Amazon EFS file systems
version_added: "2.2"
requirements: [ boto3 ]
author:
- "Ryan Sydnor (@ryansydnor)"
- "Artem Kazakov (@akazakov)"
options:
encrypt:
description:
- A boolean value that, if true, creates an encrypted file system. This can not be modfied after the file
system is created.
required: false
default: false
choices: ['yes', 'no']
version_added: 2.5
kms_key_id:
description:
- The id of the AWS KMS CMK that will be used to protect the encrypted file system. This parameter is only
required if you want to use a non-default CMK. If this parameter is not specified, the default CMK for
Amazon EFS is used. The key id can be Key ID, Key ID ARN, Key Alias or Key Alias ARN.
required: false
version_added: 2.5
purge_tags:
description:
- If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter. If the I(tags) parameter
is not set then tags will not be modified.
required: false
default: yes
choices: [ 'yes', 'no' ]
version_added: 2.5
state:
description:
- Allows to create, search and destroy Amazon EFS file system
required: false
default: 'present'
choices: ['present', 'absent']
name:
description:
- Creation Token of Amazon EFS file system. Required for create. Either name or ID required for delete.
required: false
default: None
id:
description:
- ID of Amazon EFS. Either name or ID required for delete.
required: false
default: None
performance_mode:
description:
- File system's performance mode to use. Only takes effect during creation.
required: false
default: 'general_purpose'
choices: ['general_purpose', 'max_io']
tags:
description:
- "List of tags of Amazon EFS. Should be defined as dictionary
In case of 'present' state with list of tags and existing EFS (matched by 'name'), tags of EFS will be replaced with provided data."
required: false
default: None
targets:
description:
- "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
- subnet_id - Mandatory. The ID of the subnet to add the mount target in.
- ip_address - Optional. A valid IPv4 address within the address range of the specified subnet.
- security_groups - Optional. List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified
This data may be modified for existing EFS using state 'present' and new list of mount targets."
required: false
default: None
wait:
description:
- "In case of 'present' state should wait for EFS 'available' life cycle state (of course, if current state not 'deleting' or 'deleted')
In case of 'absent' state should wait for EFS 'deleted' life cycle state"
required: false
default: "no"
choices: ["yes", "no"]
wait_timeout:
description:
- How long the module should wait (in seconds) for desired state before returning. Zero means wait as long as necessary.
required: false
default: 0
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# EFS provisioning
- efs:
state: present
name: myTestEFS
tags:
name: myTestNameTag
purpose: file-storage
targets:
- subnet_id: subnet-748c5d03
security_groups: [ "sg-1a2b3c4d" ]
# Modifying EFS data
- efs:
state: present
name: myTestEFS
tags:
name: myAnotherTestTag
targets:
- subnet_id: subnet-7654fdca
security_groups: [ "sg-4c5d6f7a" ]
# Deleting EFS
- efs:
state: absent
name: myTestEFS
'''
RETURN = '''
creation_time:
description: timestamp of creation date
returned: always
type: string
sample: "2015-11-16 07:30:57-05:00"
creation_token:
description: EFS creation token
returned: always
type: string
sample: "console-88609e04-9a0e-4a2e-912c-feaa99509961"
file_system_id:
description: ID of the file system
returned: always
type: string
sample: "fs-xxxxxxxx"
life_cycle_state:
description: state of the EFS file system
returned: always
type: string
sample: "creating, available, deleting, deleted"
mount_point:
description: url of file system
returned: always
type: string
sample: ".fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/"
mount_targets:
description: list of mount targets
returned: always
type: list
sample:
[
{
"file_system_id": "fs-a7ad440e",
"ip_address": "172.31.17.173",
"life_cycle_state": "available",
"mount_target_id": "fsmt-d8907871",
"network_interface_id": "eni-6e387e26",
"owner_id": "740748460359",
"security_groups": [
"sg-a30b22c6"
],
"subnet_id": "subnet-e265c895"
},
...
]
name:
description: name of the file system
returned: always
type: string
sample: "my-efs"
number_of_mount_targets:
description: the number of targets mounted
returned: always
type: int
sample: 3
owner_id:
description: AWS account ID of EFS owner
returned: always
type: string
sample: "XXXXXXXXXXXX"
size_in_bytes:
description: size of the file system in bytes as of a timestamp
returned: always
type: dict
sample:
{
"timestamp": "2015-12-21 13:59:59-05:00",
"value": 12288
}
performance_mode:
description: performance mode of the file system
returned: always
type: string
sample: "generalPurpose"
tags:
description: tags on the efs instance
returned: always
type: dict
sample:
{
"name": "my-efs",
"key": "Value"
}
'''
from time import sleep
from time import time as timestamp
import traceback
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError as e:
pass # Taken care of by ec2.HAS_BOTO3
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict,
ec2_argument_spec, get_aws_connection_info, ansible_dict_to_boto3_tag_list,
compare_aws_tags, boto3_tag_list_to_ansible_dict)
def _index_by_key(key, items):
return dict((item[key], item) for item in items)
class EFSConnection(object):
DEFAULT_WAIT_TIMEOUT_SECONDS = 0
STATE_CREATING = 'creating'
STATE_AVAILABLE = 'available'
STATE_DELETING = 'deleting'
STATE_DELETED = 'deleted'
def __init__(self, module, region, **aws_connect_params):
self.connection = boto3_conn(module, conn_type='client',
resource='efs', region=region,
**aws_connect_params)
self.module = module
self.region = region
self.wait = module.params.get('wait')
self.wait_timeout = module.params.get('wait_timeout')
def get_file_systems(self, **kwargs):
"""
Returns generator of file systems including all attributes of FS
"""
items = iterate_all(
'FileSystems',
self.connection.describe_file_systems,
**kwargs
)
for item in items:
item['Name'] = item['CreationToken']
item['CreationTime'] = str(item['CreationTime'])
"""
Suffix of network path to be used as NFS device for mount. More detail here:
http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
"""
item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
if 'Timestamp' in item['SizeInBytes']:
item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
if item['LifeCycleState'] == self.STATE_AVAILABLE:
item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
else:
item['Tags'] = {}
item['MountTargets'] = []
yield item
def get_tags(self, **kwargs):
"""
Returns tag list for selected instance of EFS
"""
tags = self.connection.describe_tags(**kwargs)['Tags']
return tags
def get_mount_targets(self, **kwargs):
"""
Returns mount targets for selected instance of EFS
"""
targets = iterate_all(
'MountTargets',
self.connection.describe_mount_targets,
**kwargs
)
for target in targets:
if target['LifeCycleState'] == self.STATE_AVAILABLE:
target['SecurityGroups'] = list(self.get_security_groups(
MountTargetId=target['MountTargetId']
))
else:
target['SecurityGroups'] = []
yield target
def get_security_groups(self, **kwargs):
"""
Returns security groups for selected instance of EFS
"""
return iterate_all(
'SecurityGroups',
self.connection.describe_mount_target_security_groups,
**kwargs
)
def get_file_system_id(self, name):
"""
Returns ID of instance by instance name
"""
info = first_or_default(iterate_all(
'FileSystems',
self.connection.describe_file_systems,
CreationToken=name
))
return info and info['FileSystemId'] or None
def get_file_system_state(self, name, file_system_id=None):
"""
Returns state of filesystem by EFS id/name
"""
info = first_or_default(iterate_all(
'FileSystems',
self.connection.describe_file_systems,
CreationToken=name,
FileSystemId=file_system_id
))
return info and info['LifeCycleState'] or self.STATE_DELETED
def get_mount_targets_in_state(self, file_system_id, states=None):
"""
Returns states of mount targets of selected EFS with selected state(s) (optional)
"""
targets = iterate_all(
'MountTargets',
self.connection.describe_mount_targets,
FileSystemId=file_system_id
)
if states:
if not isinstance(states, list):
states = [states]
targets = filter(lambda target: target['LifeCycleState'] in states, targets)
return list(targets)
def create_file_system(self, name, performance_mode, encrypt, kms_key_id):
"""
Creates new filesystem with selected name
"""
changed = False
state = self.get_file_system_state(name)
params = {}
params['CreationToken'] = name
params['PerformanceMode'] = performance_mode
if encrypt:
params['Encrypted'] = encrypt
if kms_key_id is not None:
params['KmsKeyId'] = kms_key_id
if state in [self.STATE_DELETING, self.STATE_DELETED]:
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_DELETED
)
try:
self.connection.create_file_system(**params)
changed = True
except ClientError as e:
self.module.fail_json(msg="Unable to create file system: {0}".format(to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except BotoCoreError as e:
self.module.fail_json(msg="Unable to create file system: {0}".format(to_native(e)),
exception=traceback.format_exc())
# we always wait for the state to be available when creating.
# if we try to take any actions on the file system before it's available
# we'll throw errors
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_AVAILABLE,
self.wait_timeout
)
return changed
def converge_file_system(self, name, tags, purge_tags, targets):
"""
Change attributes (mount targets and tags) of filesystem by name
"""
result = False
fs_id = self.get_file_system_id(name)
if tags is not None:
tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(self.get_tags(FileSystemId=fs_id)), tags, purge_tags)
if tags_to_delete:
try:
self.connection.delete_tags(
FileSystemId=fs_id,
TagKeys=tags_to_delete
)
except ClientError as e:
self.module.fail_json(msg="Unable to delete tags: {0}".format(to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except BotoCoreError as e:
self.module.fail_json(msg="Unable to delete tags: {0}".format(to_native(e)),
exception=traceback.format_exc())
result = True
if tags_need_modify:
try:
self.connection.create_tags(
FileSystemId=fs_id,
Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)
)
except ClientError as e:
self.module.fail_json(msg="Unable to create tags: {0}".format(to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except BotoCoreError as e:
self.module.fail_json(msg="Unable to create tags: {0}".format(to_native(e)),
exception=traceback.format_exc())
result = True
if targets is not None:
incomplete_states = [self.STATE_CREATING, self.STATE_DELETING]
wait_for(
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
0
)
current_targets = _index_by_key('SubnetId', self.get_mount_targets(FileSystemId=fs_id))
targets = _index_by_key('SubnetId', targets)
targets_to_create, intersection, targets_to_delete = dict_diff(current_targets,
targets, True)
# To modify mount target it should be deleted and created again
changed = [sid for sid in intersection if not targets_equal(['SubnetId', 'IpAddress', 'NetworkInterfaceId'],
current_targets[sid], targets[sid])]
targets_to_delete = list(targets_to_delete) + changed
targets_to_create = list(targets_to_create) + changed
if targets_to_delete:
for sid in targets_to_delete:
self.connection.delete_mount_target(
MountTargetId=current_targets[sid]['MountTargetId']
)
wait_for(
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
0
)
result = True
if targets_to_create:
for sid in targets_to_create:
self.connection.create_mount_target(
FileSystemId=fs_id,
**targets[sid]
)
wait_for(
lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
0,
self.wait_timeout
)
result = True
# If no security groups were passed into the module, then do not change it.
security_groups_to_update = [sid for sid in intersection if
'SecurityGroups' in targets[sid] and
current_targets[sid]['SecurityGroups'] != targets[sid]['SecurityGroups']]
if security_groups_to_update:
for sid in security_groups_to_update:
self.connection.modify_mount_target_security_groups(
MountTargetId=current_targets[sid]['MountTargetId'],
SecurityGroups=targets[sid].get('SecurityGroups', None)
)
result = True
return result
def delete_file_system(self, name, file_system_id=None):
"""
Removes EFS instance by id/name
"""
result = False
state = self.get_file_system_state(name, file_system_id)
if state in [self.STATE_CREATING, self.STATE_AVAILABLE]:
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_AVAILABLE
)
if not file_system_id:
file_system_id = self.get_file_system_id(name)
self.delete_mount_targets(file_system_id)
self.connection.delete_file_system(FileSystemId=file_system_id)
result = True
if self.wait:
wait_for(
lambda: self.get_file_system_state(name),
self.STATE_DELETED,
self.wait_timeout
)
return result
def delete_mount_targets(self, file_system_id):
"""
Removes mount targets by EFS id
"""
wait_for(
lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_CREATING)),
0
)
targets = self.get_mount_targets_in_state(file_system_id, self.STATE_AVAILABLE)
for target in targets:
self.connection.delete_mount_target(MountTargetId=target['MountTargetId'])
wait_for(
lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_DELETING)),
0
)
return len(targets) > 0
def iterate_all(attr, map_method, **kwargs):
"""
Method creates iterator from result set
"""
args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
wait = 1
while True:
try:
data = map_method(**args)
for elm in data[attr]:
yield elm
if 'NextMarker' in data:
args['Marker'] = data['Nextmarker']
continue
break
except ClientError as e:
if e.response['Error']['Code'] == "ThrottlingException" and wait < 600:
sleep(wait)
wait = wait * 2
continue
else:
raise
def targets_equal(keys, a, b):
"""
Method compare two mount targets by specified attributes
"""
for key in keys:
if key in b and a[key] != b[key]:
return False
return True
def dict_diff(dict1, dict2, by_key=False):
"""
Helper method to calculate difference of two dictionaries
"""
keys1 = set(dict1.keys() if by_key else dict1.items())
keys2 = set(dict2.keys() if by_key else dict2.items())
intersection = keys1 & keys2
return keys2 ^ intersection, intersection, keys1 ^ intersection
def first_or_default(items, default=None):
"""
Helper method to fetch first element of list (if exists)
"""
for item in items:
return item
return default
def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS):
"""
Helper method to wait for desired value returned by callback method
"""
wait_start = timestamp()
while True:
if callback() != value:
if timeout != 0 and (timestamp() - wait_start > timeout):
raise RuntimeError('Wait timeout exceeded (' + str(timeout) + ' sec)')
else:
sleep(5)
continue
break
def main():
"""
Module action handler
"""
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
encrypt=dict(required=False, type="bool", default=False),
state=dict(required=False, type='str', choices=["present", "absent"], default="present"),
kms_key_id=dict(required=False, type='str', default=None),
purge_tags=dict(default=True, type='bool'),
id=dict(required=False, type='str', default=None),
name=dict(required=False, type='str', default=None),
tags=dict(required=False, type="dict", default={}),
targets=dict(required=False, type="list", default=[]),
performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"),
wait=dict(required=False, type="bool", default=False),
wait_timeout=dict(required=False, type="int", default=0)
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = EFSConnection(module, region, **aws_connect_params)
name = module.params.get('name')
fs_id = module.params.get('id')
tags = module.params.get('tags')
target_translations = {
'ip_address': 'IpAddress',
'security_groups': 'SecurityGroups',
'subnet_id': 'SubnetId'
}
targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')]
performance_mode_translations = {
'general_purpose': 'generalPurpose',
'max_io': 'maxIO'
}
encrypt = module.params.get('encrypt')
kms_key_id = module.params.get('kms_key_id')
performance_mode = performance_mode_translations[module.params.get('performance_mode')]
purge_tags = module.params.get('purge_tags')
changed = False
state = str(module.params.get('state')).lower()
if state == 'present':
if not name:
module.fail_json(msg='Name parameter is required for create')
changed = connection.create_file_system(name, performance_mode, encrypt, kms_key_id)
changed = connection.converge_file_system(name=name, tags=tags, purge_tags=purge_tags, targets=targets) or changed
result = first_or_default(connection.get_file_systems(CreationToken=name))
elif state == 'absent':
if not name and not fs_id:
module.fail_json(msg='Either name or id parameter is required for delete')
changed = connection.delete_file_system(name, fs_id)
result = None
if result:
result = camel_dict_to_snake_dict(result)
module.exit_json(changed=changed, efs=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
Runscope/pysaml2 | src/saml2/mdie.py | 34 | 4757 | #!/usr/bin/env python
from saml2 import element_to_extension_element
from saml2 import extension_elements_to_elements
from saml2 import SamlBase
from saml2 import md
__author__ = 'rolandh'
"""
Functions used to import metadata from and export it to a pysaml2 format
"""
IMP_SKIP = ["_certs", "e_e_", "_extatt"]
EXP_SKIP = ["__class__"]
# From pysaml2 SAML2 metadata format to Python dictionary
def _eval(val, onts, mdb_safe):
"""
Convert a value to a basic dict format
:param val: The value
:param onts: Schemas to be used in the conversion
:return: The basic dictionary
"""
if isinstance(val, basestring):
val = val.strip()
if not val:
return None
else:
return val
elif isinstance(val, dict) or isinstance(val, SamlBase):
return to_dict(val, onts, mdb_safe)
elif isinstance(val, list):
lv = []
for v in val:
if isinstance(v, dict) or isinstance(v, SamlBase):
lv.append(to_dict(v, onts, mdb_safe))
else:
lv.append(v)
return lv
return val
def to_dict(_dict, onts, mdb_safe=False):
"""
Convert a pysaml2 SAML2 message class instance into a basic dictionary
format.
The export interface.
:param _dict: The pysaml2 metadata instance
:param onts: List of schemas to use for the conversion
:return: The converted information
"""
res = {}
if isinstance(_dict, SamlBase):
res["__class__"] = "%s&%s" % (_dict.c_namespace, _dict.c_tag)
for key in _dict.keyswv():
if key in IMP_SKIP:
continue
val = getattr(_dict, key)
if key == "extension_elements":
_eel = extension_elements_to_elements(val, onts)
_val = [_eval(_v, onts, mdb_safe) for _v in _eel]
elif key == "extension_attributes":
if mdb_safe:
_val = dict([(k.replace(".", "__"), v) for k, v in
val.items()])
#_val = {k.replace(".", "__"): v for k, v in val.items()}
else:
_val = val
else:
_val = _eval(val, onts, mdb_safe)
if _val:
if mdb_safe:
key = key.replace(".", "__")
res[key] = _val
else:
for key, val in _dict.items():
_val = _eval(val, onts, mdb_safe)
if _val:
if mdb_safe and "." in key:
key = key.replace(".", "__")
res[key] = _val
return res
# From Python dictionary to pysaml2 SAML2 metadata format
def _kwa(val, onts, mdb_safe=False):
"""
Key word argument conversion
:param val: A dictionary
:param onts: dictionary with schemas to use in the conversion
schema namespase is the key in the dictionary
:return: A converted dictionary
"""
if not mdb_safe:
return dict([(k, from_dict(v, onts)) for k, v in val.items()
if k not in EXP_SKIP])
else:
_skip = ["_id"]
_skip.extend(EXP_SKIP)
return dict([(k.replace("__", "."), from_dict(v, onts)) for k, v in
val.items() if k not in _skip])
def from_dict(val, onts, mdb_safe=False):
"""
Converts a dictionary into a pysaml2 object
:param val: A dictionary
:param onts: Dictionary of schemas to use in the conversion
:return: The pysaml2 object instance
"""
if isinstance(val, dict):
if "__class__" in val:
ns, typ = val["__class__"].split("&")
cls = getattr(onts[ns], typ)
if cls is md.Extensions:
lv = []
for key, ditems in val.items():
if key in EXP_SKIP:
continue
for item in ditems:
ns, typ = item["__class__"].split("&")
cls = getattr(onts[ns], typ)
kwargs = _kwa(item, onts, mdb_safe)
inst = cls(**kwargs)
lv.append(element_to_extension_element(inst))
return lv
else:
kwargs = _kwa(val, onts, mdb_safe)
inst = cls(**kwargs)
return inst
else:
res = {}
for key, v in val.items():
if mdb_safe:
key = key.replace("__", ".")
res[key] = from_dict(v, onts)
return res
elif isinstance(val, basestring):
return val
elif isinstance(val, list):
return [from_dict(v, onts) for v in val]
else:
return val
| bsd-2-clause |
BlindHunter/django | tests/model_formsets_regress/tests.py | 173 | 20725 | from __future__ import unicode_literals
from django import forms
from django.forms.formsets import DELETION_FIELD_NAME, BaseFormSet
from django.forms.models import (
BaseModelFormSet, inlineformset_factory, modelform_factory,
modelformset_factory,
)
from django.forms.utils import ErrorDict, ErrorList
from django.test import TestCase
from django.utils import six
from .models import (
Host, Manager, Network, ProfileNetwork, Restaurant, User, UserProfile,
UserSite,
)
class InlineFormsetTests(TestCase):
def test_formset_over_to_field(self):
"A formset over a ForeignKey with a to_field can be saved. Regression for #10243"
Form = modelform_factory(User, fields="__all__")
FormSet = inlineformset_factory(User, UserSite, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a form with no data
form = Form()
form_set = FormSet(instance=User())
# Now create a new User and UserSite instance
data = {
'serial': '1',
'username': 'apollo13',
'usersite_set-TOTAL_FORMS': '1',
'usersite_set-INITIAL_FORMS': '0',
'usersite_set-MAX_NUM_FORMS': '0',
'usersite_set-0-data': '10',
'usersite_set-0-user': 'apollo13'
}
user = User()
form = Form(data)
if form.is_valid():
user = form.save()
else:
self.fail('Errors found on form:%s' % form_set)
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values()
self.assertEqual(usersite[0]['data'], 10)
self.assertEqual(usersite[0]['user_id'], 'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now update the UserSite instance
data = {
'usersite_set-TOTAL_FORMS': '1',
'usersite_set-INITIAL_FORMS': '1',
'usersite_set-MAX_NUM_FORMS': '0',
'usersite_set-0-id': six.text_type(usersite[0]['id']),
'usersite_set-0-data': '11',
'usersite_set-0-user': 'apollo13'
}
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values()
self.assertEqual(usersite[0]['data'], 11)
self.assertEqual(usersite[0]['user_id'], 'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now add a new UserSite instance
data = {
'usersite_set-TOTAL_FORMS': '2',
'usersite_set-INITIAL_FORMS': '1',
'usersite_set-MAX_NUM_FORMS': '0',
'usersite_set-0-id': six.text_type(usersite[0]['id']),
'usersite_set-0-data': '11',
'usersite_set-0-user': 'apollo13',
'usersite_set-1-data': '42',
'usersite_set-1-user': 'apollo13'
}
form_set = FormSet(data, instance=user)
if form_set.is_valid():
form_set.save()
usersite = UserSite.objects.all().values().order_by('data')
self.assertEqual(usersite[0]['data'], 11)
self.assertEqual(usersite[0]['user_id'], 'apollo13')
self.assertEqual(usersite[1]['data'], 42)
self.assertEqual(usersite[1]['user_id'], 'apollo13')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
def test_formset_over_inherited_model(self):
"A formset over a ForeignKey with a to_field can be saved. Regression for #11120"
Form = modelform_factory(Restaurant, fields="__all__")
FormSet = inlineformset_factory(Restaurant, Manager, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a form with no data
form = Form()
form_set = FormSet(instance=Restaurant())
# Now create a new Restaurant and Manager instance
data = {
'name': "Guido's House of Pasta",
'manager_set-TOTAL_FORMS': '1',
'manager_set-INITIAL_FORMS': '0',
'manager_set-MAX_NUM_FORMS': '0',
'manager_set-0-name': 'Guido Van Rossum'
}
restaurant = User()
form = Form(data)
if form.is_valid():
restaurant = form.save()
else:
self.fail('Errors found on form:%s' % form_set)
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values()
self.assertEqual(manager[0]['name'], 'Guido Van Rossum')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now update the Manager instance
data = {
'manager_set-TOTAL_FORMS': '1',
'manager_set-INITIAL_FORMS': '1',
'manager_set-MAX_NUM_FORMS': '0',
'manager_set-0-id': six.text_type(manager[0]['id']),
'manager_set-0-name': 'Terry Gilliam'
}
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values()
self.assertEqual(manager[0]['name'], 'Terry Gilliam')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
# Now add a new Manager instance
data = {
'manager_set-TOTAL_FORMS': '2',
'manager_set-INITIAL_FORMS': '1',
'manager_set-MAX_NUM_FORMS': '0',
'manager_set-0-id': six.text_type(manager[0]['id']),
'manager_set-0-name': 'Terry Gilliam',
'manager_set-1-name': 'John Cleese'
}
form_set = FormSet(data, instance=restaurant)
if form_set.is_valid():
form_set.save()
manager = Manager.objects.all().values().order_by('name')
self.assertEqual(manager[0]['name'], 'John Cleese')
self.assertEqual(manager[1]['name'], 'Terry Gilliam')
else:
self.fail('Errors found on formset:%s' % form_set.errors)
def test_inline_model_with_to_field(self):
"""
#13794 --- An inline model with a to_field of a formset with instance
has working relations.
"""
FormSet = inlineformset_factory(User, UserSite, exclude=('is_superuser',))
user = User.objects.create(username="guido", serial=1337)
UserSite.objects.create(user=user, data=10)
formset = FormSet(instance=user)
# Testing the inline model's relation
self.assertEqual(formset[0].instance.user_id, "guido")
def test_inline_model_with_to_field_to_rel(self):
"""
#13794 --- An inline model with a to_field to a related field of a
formset with instance has working relations.
"""
FormSet = inlineformset_factory(UserProfile, ProfileNetwork, exclude=[])
user = User.objects.create(username="guido", serial=1337, pk=1)
self.assertEqual(user.pk, 1)
profile = UserProfile.objects.create(user=user, about="about", pk=2)
self.assertEqual(profile.pk, 2)
ProfileNetwork.objects.create(profile=profile, network=10, identifier=10)
formset = FormSet(instance=profile)
# Testing the inline model's relation
self.assertEqual(formset[0].instance.profile_id, 1)
def test_formset_with_none_instance(self):
"A formset with instance=None can be created. Regression for #11872"
Form = modelform_factory(User, fields="__all__")
FormSet = inlineformset_factory(User, UserSite, fields="__all__")
# Instantiate the Form and FormSet to prove
# you can create a formset with an instance of None
Form(instance=None)
FormSet(instance=None)
def test_empty_fields_on_modelformset(self):
"""
No fields passed to modelformset_factory() should result in no fields
on returned forms except for the id (#14119).
"""
UserFormSet = modelformset_factory(User, fields=())
formset = UserFormSet()
for form in formset.forms:
self.assertIn('id', form.fields)
self.assertEqual(len(form.fields), 1)
def test_save_as_new_with_new_inlines(self):
"""
Existing and new inlines are saved with save_as_new.
Regression for #14938.
"""
efnet = Network.objects.create(name="EFNet")
host1 = Host.objects.create(hostname="irc.he.net", network=efnet)
HostFormSet = inlineformset_factory(Network, Host, fields="__all__")
# Add a new host, modify previous host, and save-as-new
data = {
'host_set-TOTAL_FORMS': '2',
'host_set-INITIAL_FORMS': '1',
'host_set-MAX_NUM_FORMS': '0',
'host_set-0-id': six.text_type(host1.id),
'host_set-0-hostname': 'tranquility.hub.dal.net',
'host_set-1-hostname': 'matrix.de.eu.dal.net'
}
# To save a formset as new, it needs a new hub instance
dalnet = Network.objects.create(name="DALnet")
formset = HostFormSet(data, instance=dalnet, save_as_new=True)
self.assertTrue(formset.is_valid())
formset.save()
self.assertQuerysetEqual(
dalnet.host_set.order_by("hostname"),
["<Host: matrix.de.eu.dal.net>", "<Host: tranquility.hub.dal.net>"]
)
def test_initial_data(self):
user = User.objects.create(username="bibi", serial=1)
UserSite.objects.create(user=user, data=7)
FormSet = inlineformset_factory(User, UserSite, extra=2, fields="__all__")
formset = FormSet(instance=user, initial=[{'data': 41}, {'data': 42}])
self.assertEqual(formset.forms[0].initial['data'], 7)
self.assertEqual(formset.extra_forms[0].initial['data'], 41)
self.assertIn('value="42"', formset.extra_forms[1].as_p())
class FormsetTests(TestCase):
def test_error_class(self):
'''
Test the type of Formset and Form error attributes
'''
Formset = modelformset_factory(User, fields="__all__")
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
'form-0-id': '',
'form-0-username': 'apollo13',
'form-0-serial': '1',
'form-1-id': '',
'form-1-username': 'apollo13',
'form-1-serial': '2',
}
formset = Formset(data)
# check if the returned error classes are correct
# note: formset.errors returns a list as documented
self.assertIsInstance(formset.errors, list)
self.assertIsInstance(formset.non_form_errors(), ErrorList)
for form in formset.forms:
self.assertIsInstance(form.errors, ErrorDict)
self.assertIsInstance(form.non_field_errors(), ErrorList)
def test_initial_data(self):
User.objects.create(username="bibi", serial=1)
Formset = modelformset_factory(User, fields="__all__", extra=2)
formset = Formset(initial=[{'username': 'apollo11'}, {'username': 'apollo12'}])
self.assertEqual(formset.forms[0].initial['username'], "bibi")
self.assertEqual(formset.extra_forms[0].initial['username'], "apollo11")
self.assertIn('value="apollo12"', formset.extra_forms[1].as_p())
def test_extraneous_query_is_not_run(self):
Formset = modelformset_factory(Network, fields="__all__")
data = {'test-TOTAL_FORMS': '1',
'test-INITIAL_FORMS': '0',
'test-MAX_NUM_FORMS': '',
'test-0-name': 'Random Place', }
with self.assertNumQueries(1):
formset = Formset(data, prefix="test")
formset.save()
class CustomWidget(forms.widgets.TextInput):
pass
class UserSiteForm(forms.ModelForm):
class Meta:
model = UserSite
fields = "__all__"
widgets = {
'id': CustomWidget,
'data': CustomWidget,
}
localized_fields = ('data',)
class Callback(object):
def __init__(self):
self.log = []
def __call__(self, db_field, **kwargs):
self.log.append((db_field, kwargs))
return db_field.formfield(**kwargs)
class FormfieldCallbackTests(TestCase):
"""
Regression for #13095 and #17683: Using base forms with widgets
defined in Meta should not raise errors and BaseModelForm should respect
the specified pk widget.
"""
def test_inlineformset_factory_default(self):
Formset = inlineformset_factory(User, UserSite, form=UserSiteForm, fields="__all__")
form = Formset().forms[0]
self.assertIsInstance(form['id'].field.widget, CustomWidget)
self.assertIsInstance(form['data'].field.widget, CustomWidget)
self.assertFalse(form.fields['id'].localize)
self.assertTrue(form.fields['data'].localize)
def test_modelformset_factory_default(self):
Formset = modelformset_factory(UserSite, form=UserSiteForm)
form = Formset().forms[0]
self.assertIsInstance(form['id'].field.widget, CustomWidget)
self.assertIsInstance(form['data'].field.widget, CustomWidget)
self.assertFalse(form.fields['id'].localize)
self.assertTrue(form.fields['data'].localize)
def assertCallbackCalled(self, callback):
id_field, user_field, data_field = UserSite._meta.fields
expected_log = [
(id_field, {'widget': CustomWidget}),
(user_field, {}),
(data_field, {'widget': CustomWidget, 'localize': True}),
]
self.assertEqual(callback.log, expected_log)
def test_inlineformset_custom_callback(self):
callback = Callback()
inlineformset_factory(User, UserSite, form=UserSiteForm,
formfield_callback=callback, fields="__all__")
self.assertCallbackCalled(callback)
def test_modelformset_custom_callback(self):
callback = Callback()
modelformset_factory(UserSite, form=UserSiteForm,
formfield_callback=callback)
self.assertCallbackCalled(callback)
class BaseCustomDeleteFormSet(BaseFormSet):
"""
A formset mix-in that lets a form decide if it's to be deleted.
Works for BaseFormSets. Also works for ModelFormSets with #14099 fixed.
form.should_delete() is called. The formset delete field is also suppressed.
"""
def add_fields(self, form, index):
super(BaseCustomDeleteFormSet, self).add_fields(form, index)
self.can_delete = True
if DELETION_FIELD_NAME in form.fields:
del form.fields[DELETION_FIELD_NAME]
def _should_delete_form(self, form):
return hasattr(form, 'should_delete') and form.should_delete()
class FormfieldShouldDeleteFormTests(TestCase):
"""
Regression for #14099: BaseModelFormSet should use ModelFormSet method _should_delete_form
"""
class BaseCustomDeleteModelFormSet(BaseModelFormSet, BaseCustomDeleteFormSet):
""" Model FormSet with CustomDelete MixIn """
class CustomDeleteUserForm(forms.ModelForm):
""" A model form with a 'should_delete' method """
class Meta:
model = User
fields = "__all__"
def should_delete(self):
""" delete form if odd PK """
return self.instance.pk % 2 != 0
NormalFormset = modelformset_factory(User, form=CustomDeleteUserForm, can_delete=True)
DeleteFormset = modelformset_factory(User, form=CustomDeleteUserForm, formset=BaseCustomDeleteModelFormSet)
data = {
'form-TOTAL_FORMS': '4',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '4',
'form-0-username': 'John',
'form-0-serial': '1',
'form-1-username': 'Paul',
'form-1-serial': '2',
'form-2-username': 'George',
'form-2-serial': '3',
'form-3-username': 'Ringo',
'form-3-serial': '5',
}
delete_all_ids = {
'form-0-DELETE': '1',
'form-1-DELETE': '1',
'form-2-DELETE': '1',
'form-3-DELETE': '1',
}
def test_init_database(self):
""" Add test data to database via formset """
formset = self.NormalFormset(self.data)
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 4)
def test_no_delete(self):
""" Verify base formset doesn't modify database """
# reload database
self.test_init_database()
# pass standard data dict & see none updated
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update({
'form-%d-id' % i: user.pk
for i, user in enumerate(User.objects.all())
})
formset = self.NormalFormset(data, queryset=User.objects.all())
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 4)
def test_all_delete(self):
""" Verify base formset honors DELETE field """
# reload database
self.test_init_database()
# create data dict with all fields marked for deletion
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update({
'form-%d-id' % i: user.pk
for i, user in enumerate(User.objects.all())
})
data.update(self.delete_all_ids)
formset = self.NormalFormset(data, queryset=User.objects.all())
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 0)
def test_custom_delete(self):
""" Verify DeleteFormset ignores DELETE field and uses form method """
# reload database
self.test_init_database()
# Create formset with custom Delete function
# create data dict with all fields marked for deletion
data = dict(self.data)
data['form-INITIAL_FORMS'] = 4
data.update({
'form-%d-id' % i: user.pk
for i, user in enumerate(User.objects.all())
})
data.update(self.delete_all_ids)
formset = self.DeleteFormset(data, queryset=User.objects.all())
# verify two were deleted
self.assertTrue(formset.is_valid())
self.assertEqual(len(formset.save()), 0)
self.assertEqual(len(User.objects.all()), 2)
# verify no "odd" PKs left
odd_ids = [user.pk for user in User.objects.all() if user.pk % 2]
self.assertEqual(len(odd_ids), 0)
class RedeleteTests(TestCase):
def test_resubmit(self):
u = User.objects.create(username='foo', serial=1)
us = UserSite.objects.create(user=u, data=7)
formset_cls = inlineformset_factory(User, UserSite, fields="__all__")
data = {
'serial': '1',
'username': 'foo',
'usersite_set-TOTAL_FORMS': '1',
'usersite_set-INITIAL_FORMS': '1',
'usersite_set-MAX_NUM_FORMS': '1',
'usersite_set-0-id': six.text_type(us.pk),
'usersite_set-0-data': '7',
'usersite_set-0-user': 'foo',
'usersite_set-0-DELETE': '1'
}
formset = formset_cls(data, instance=u)
self.assertTrue(formset.is_valid())
formset.save()
self.assertEqual(UserSite.objects.count(), 0)
formset = formset_cls(data, instance=u)
# Even if the "us" object isn't in the DB any more, the form
# validates.
self.assertTrue(formset.is_valid())
formset.save()
self.assertEqual(UserSite.objects.count(), 0)
def test_delete_already_deleted(self):
u = User.objects.create(username='foo', serial=1)
us = UserSite.objects.create(user=u, data=7)
formset_cls = inlineformset_factory(User, UserSite, fields="__all__")
data = {
'serial': '1',
'username': 'foo',
'usersite_set-TOTAL_FORMS': '1',
'usersite_set-INITIAL_FORMS': '1',
'usersite_set-MAX_NUM_FORMS': '1',
'usersite_set-0-id': six.text_type(us.pk),
'usersite_set-0-data': '7',
'usersite_set-0-user': 'foo',
'usersite_set-0-DELETE': '1'
}
formset = formset_cls(data, instance=u)
us.delete()
self.assertTrue(formset.is_valid())
formset.save()
self.assertEqual(UserSite.objects.count(), 0)
| bsd-3-clause |
terbolous/CouchPotatoServer | libs/git/files.py | 122 | 1831 | # Copyright (c) 2009, Rotem Yaari <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class ModifiedFile(object):
def __init__(self, filename):
super(ModifiedFile, self).__init__()
self.filename = filename
def __repr__(self):
return self.filename
def __eq__(self, other):
return isinstance(other, ModifiedFile) and other.filename == self.filename
| gpl-3.0 |
moas/carbooking | booking/courses/models.py | 1 | 3802 | from __future__ import unicode_literals
import datetime
from django.core.exceptions import ValidationError
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext as _
from django.db.models import signals
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from cities_light.models import Country, City
from model_utils import FieldTracker
from ..companies.models import Cars
from ..utils.common import CommonFields
# Create your models here.
@python_2_unicode_compatible
class Journey(CommonFields):
customer = models.ForeignKey(
User,
limit_choices_to={
'groups__name': settings.GROUP_CUSTOMER_LABEL,
'is_active': True,
},
on_delete=models.CASCADE,
verbose_name=_('Customer'),
)
country = models.ForeignKey(
Country,
on_delete=models.CASCADE,
verbose_name=_("Country")
)
departure_city = models.ForeignKey(
City,
verbose_name=_('Departure city'),
related_name='departure_point',
help_text=_('Departure must be related to country selected'),
)
departure_address = models.CharField(
_("Departure address"),
max_length=150
)
departure_dt = models.DateTimeField(
_('Start time'),
default=timezone.now() + timezone.timedelta(minutes=15),
)
arrival_city = models.ForeignKey(
City,
verbose_name=_('Arrival city'),
related_name='arrival_point',
help_text=_('Arrival must be related to country selected')
)
arrival_address = models.CharField(
_('Arrival address'),
max_length=150,
)
car = models.ForeignKey(
Cars,
limit_choices_to={'is_active': True, },
verbose_name=_('Car'),
)
is_active = models.BooleanField(
default=True
)
car_tracker = FieldTracker(['car'])
def __str__(self):
return "Journey {}: {}".format(
self.id,
self.customer.get_full_name(),
)
def customer_full_name(self):
return self.customer.get_full_name()
def to(self):
return '{} ({})'.format(
self.departure_city,
self.departure_address,
)
def destination(self):
return '{} ({})'.format(
self.arrival_city,
self.arrival_address,
)
destination.short_description = 'from'
class Meta:
verbose_name = _("journey")
verbose_name_plural = _("List of journey")
def get_absolute_url(self):
from django.core.urlresolvers import reverse
return reverse('courses:detail-course', self.id)
@classmethod
def reserved_flag(cls, sender, instance, created, **kwargs):
if created is True:
instance.car.is_reserved = True
else:
if instance.car_tracker.has_changed('car') is True:
previous_car = instance.car_tracker.previous('car')
previous_car.is_reserved = False
previous_car.save()
instance.car.is_reserved = instance.is_active
instance.car.save()
@classmethod
def post_delete_handler(cls, sender, instance, **kwargs):
car = instance.car
car.is_reserved = False
car.save()
def clean(self):
if self.car_tracker.has_changed('car') is True:
if self.car.is_reserved is True:
raise ValidationError(
{'car': _('Car selected is already reserved')}
)
signals.post_save.connect(Journey.reserved_flag, sender=Journey)
signals.post_delete.connect(Journey.post_delete_handler, sender=Journey)
| mit |
thomasvincent/utilities | NagiosPlugins/check_procs/pexpect/examples/chess2.py | 17 | 4026 | #!/usr/bin/env python
'''This demonstrates controlling a screen oriented application (curses).
It starts two instances of gnuchess and then pits them against each other.
'''
import pexpect
import string
import ANSI
import sys, os, time
class Chess:
def __init__(self, engine = "/usr/local/bin/gnuchess -a -h 1"):
self.child = pexpect.spawn (engine)
self.term = ANSI.ANSI ()
#self.child.expect ('Chess')
#if self.child.after != 'Chess':
# raise IOError, 'incompatible chess program'
#self.term.process_list (self.child.before)
#self.term.process_list (self.child.after)
self.last_computer_move = ''
def read_until_cursor (self, r,c, e=0):
'''Eventually something like this should move into the screen class or
a subclass. Maybe a combination of pexpect and screen...
'''
fout = open ('log','a')
while self.term.cur_r != r or self.term.cur_c != c:
try:
k = self.child.read(1, 10)
except Exception, e:
print 'EXCEPTION, (r,c):(%d,%d)\n' %(self.term.cur_r, self.term.cur_c)
sys.stdout.flush()
self.term.process (k)
fout.write ('(r,c):(%d,%d)\n' %(self.term.cur_r, self.term.cur_c))
fout.flush()
if e:
sys.stdout.write (k)
sys.stdout.flush()
if self.term.cur_r == r and self.term.cur_c == c:
fout.close()
return 1
print 'DIDNT EVEN HIT.'
fout.close()
return 1
def expect_region (self):
'''This is another method that would be moved into the
screen class.
'''
pass
def do_scan (self):
fout = open ('log','a')
while 1:
c = self.child.read(1,10)
self.term.process (c)
fout.write ('(r,c):(%d,%d)\n' %(self.term.cur_r, self.term.cur_c))
fout.flush()
sys.stdout.write (c)
sys.stdout.flush()
def do_move (self, move, e = 0):
time.sleep(1)
self.read_until_cursor (19,60, e)
self.child.sendline (move)
def wait (self, color):
while 1:
r = self.term.get_region (14,50,14,60)[0]
r = r.strip()
if r == color:
return
time.sleep (1)
def parse_computer_move (self, s):
i = s.find ('is: ')
cm = s[i+3:i+9]
return cm
def get_computer_move (self, e = 0):
time.sleep(1)
self.read_until_cursor (19,60, e)
time.sleep(1)
r = self.term.get_region (17,50,17,62)[0]
cm = self.parse_computer_move (r)
return cm
def switch (self):
print 'switching'
self.child.sendline ('switch')
def set_depth (self, depth):
self.child.sendline ('depth')
self.child.expect ('depth=')
self.child.sendline ('%d' % depth)
def quit(self):
self.child.sendline ('quit')
def LOG (s):
print s
sys.stdout.flush ()
fout = open ('moves.log', 'a')
fout.write (s + '\n')
fout.close()
print 'Starting...'
black = Chess()
white = Chess()
white.read_until_cursor (19,60,1)
white.switch()
done = 0
while not done:
white.wait ('Black')
move_white = white.get_computer_move(1)
LOG ( 'move white:'+ move_white )
black.do_move (move_white)
black.wait ('White')
move_black = black.get_computer_move()
LOG ( 'move black:'+ move_black )
white.do_move (move_black, 1)
g.quit()
| apache-2.0 |
hdinsight/hue | desktop/core/ext-py/Django-1.6.10/tests/reverse_single_related/tests.py | 150 | 1491 | from __future__ import absolute_import
from django.test import TestCase
from .models import Source, Item
class ReverseSingleRelatedTests(TestCase):
"""
Regression tests for an object that cannot access a single related
object due to a restrictive default manager.
"""
def test_reverse_single_related(self):
public_source = Source.objects.create(is_public=True)
public_item = Item.objects.create(source=public_source)
private_source = Source.objects.create(is_public=False)
private_item = Item.objects.create(source=private_source)
# Only one source is available via all() due to the custom default manager.
self.assertQuerysetEqual(
Source.objects.all(),
["<Source: Source object>"]
)
self.assertEqual(public_item.source, public_source)
# Make sure that an item can still access its related source even if the default
# manager doesn't normally allow it.
self.assertEqual(private_item.source, private_source)
# If the manager is marked "use_for_related_fields", it'll get used instead
# of the "bare" queryset. Usually you'd define this as a property on the class,
# but this approximates that in a way that's easier in tests.
Source.objects.use_for_related_fields = True
private_item = Item.objects.get(pk=private_item.pk)
self.assertRaises(Source.DoesNotExist, lambda: private_item.source)
| apache-2.0 |
DDelon/youtube-dl | youtube_dl/extractor/extremetube.py | 31 | 3146 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
sanitized_Request,
str_to_int,
)
class ExtremeTubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?extremetube\.com/(?:[^/]+/)?video/(?P<id>[^/#?&]+)'
_TESTS = [{
'url': 'http://www.extremetube.com/video/music-video-14-british-euro-brit-european-cumshots-swallow-652431',
'md5': '344d0c6d50e2f16b06e49ca011d8ac69',
'info_dict': {
'id': 'music-video-14-british-euro-brit-european-cumshots-swallow-652431',
'ext': 'mp4',
'title': 'Music Video 14 british euro brit european cumshots swallow',
'uploader': 'unknown',
'view_count': int,
'age_limit': 18,
}
}, {
'url': 'http://www.extremetube.com/gay/video/abcde-1234',
'only_matching': True,
}, {
'url': 'http://www.extremetube.com/video/latina-slut-fucked-by-fat-black-dick',
'only_matching': True,
}, {
'url': 'http://www.extremetube.com/video/652431',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
req = sanitized_Request(url)
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, video_id)
video_title = self._html_search_regex(
r'<h1 [^>]*?title="([^"]+)"[^>]*>', webpage, 'title')
uploader = self._html_search_regex(
r'Uploaded by:\s*</strong>\s*(.+?)\s*</div>',
webpage, 'uploader', fatal=False)
view_count = str_to_int(self._html_search_regex(
r'Views:\s*</strong>\s*<span>([\d,\.]+)</span>',
webpage, 'view count', fatal=False))
flash_vars = self._parse_json(
self._search_regex(
r'var\s+flashvars\s*=\s*({.+?});', webpage, 'flash vars'),
video_id)
formats = []
for quality_key, video_url in flash_vars.items():
height = int_or_none(self._search_regex(
r'quality_(\d+)[pP]$', quality_key, 'height', default=None))
if not height:
continue
f = {
'url': video_url,
}
mobj = re.search(
r'/(?P<height>\d{3,4})[pP]_(?P<bitrate>\d+)[kK]_\d+', video_url)
if mobj:
height = int(mobj.group('height'))
bitrate = int(mobj.group('bitrate'))
f.update({
'format_id': '%dp-%dk' % (height, bitrate),
'height': height,
'tbr': bitrate,
})
else:
f.update({
'format_id': '%dp' % height,
'height': height,
})
formats.append(f)
self._sort_formats(formats)
return {
'id': video_id,
'title': video_title,
'formats': formats,
'uploader': uploader,
'view_count': view_count,
'age_limit': 18,
}
| unlicense |
vigov5/pvp-game | build_question.py | 1 | 1744 | #!venv/bin/python
# -*- coding: utf-8 -*-
import tornado
import tornado.websocket
import tornado.wsgi
import logging
import time
import json
import random
from app import app, db
from app.models import User, Game, Fact, Deck, ROLE_USER, ROLE_ADMIN, get_object_or_404
a = {'あ':'a',
'い':'i',
'う':'u',
'え':'e',
'お':'o',
'か':'ka',
'き':'ki',
'く':'ku',
'け':'ke',
'こ':'ko',
'さ':'sa',
'し':'shi',
'す':'su',
'せ':'se',
'そ':'so',
'た':'ta',
'ち':'chi',
'つ':'tsu',
'て':'te',
'と':'to',
'な':'na',
'に':'ni',
'ぬ':'nu',
'ね':'ne',
'の':'no',
'は':'ha',
'ひ':'hi',
'ふ':'fu',
'へ':'he',
'ほ':'ho',
'ま':'ma',
'み':'mi',
'む':'mu',
'め':'me',
'も':'mo',
'や':'ya',
'ゆ':'yu',
'よ':'yo',
'ら':'ra',
'り':'ri',
'る':'ru',
'れ':'re',
'ろ':'ro',
'わ':'wa',
'を':'o',
'ん':'n',
'が':'ga',
'ぎ':'gi',
'ぐ':'gu',
'げ':'ge',
'ご':'go',
'ざ':'za',
'じ':'ji',
'ず':'zu',
'ぜ':'ze',
'ぞ':'zo',
'だ':'da',
'で':'de',
'ど':'do',
'ば':'ba',
'び':'bi',
'ぶ':'bu',
'べ':'be',
'ぼ':'bo',
'ぱ':'pa',
'ぴ':'pi',
'ぷ':'pu',
'ぺ':'pe',
'ぽ':'po',
'きゃ':'kya',
'きゅ':'kyu',
'きょ':'kyo',
'しゃ':'sha',
'しゅ':'shu',
'しょ':'sho',
'ちゃ':'cha',
'ちゅ':'chu',
'ちょ':'cho',
'にゃ':'nya',
'にゅ':'nyu',
'にょ':'nyo',
'ひゃ':'hya',
'ひゅ':'hyu',
'ひょ':'hyo',
'みゃ':'mya',
'みゅ':'myu',
'みょ':'myo',
'りゃ':'rya',
'りゅ':'ryu',
'りょ':'ryo',
'ぎゃ':'gya',
'ぎゅ':'gyu',
'ぎょ':'gyo',
'じゃ':'ja',
'じゅ':'ju',
'じょ':'jo',
'びゃ':'bya',
'びゅ':'byu',
'びょ':'byo',
'ぴゃ':'pya',
'ぴゅ':'pyu',
'ぴょ':'pyo'}
d = Deck.query.get(1)
for k,v in a.items():
z = Fact(front=k, back=v, deck=d)
db.session.add(z)
db.session.commit()
| mit |
the-c0d3r/CapTipper | CTMagic.py | 11 | 6620 | #
# CapTipper is a malicious HTTP traffic explorer tool
# By Omri Herscovici <omriher AT gmail.com>
# http://omriher.com
# @omriher
#
#
# This file is part of CapTipper, and part of the Whatype library
# Whatype is an independent file type identification python library
# https://github.com/omriher/whatype
#
# CapTipper is a free software under the GPLv3 License
#
import os
class WhatypeErr(Exception):
def __init__(self, when, error):
self.when = when
self.error = error
def __str__(self):
return repr("Whatype Error on " + self.when + " : " + self.error)
class MagicNode(object):
def __init__(self, byte):
self.byte = byte
self.filetype = ""
self.ext = ""
self.strings = ""
self.children = []
def add_child(self, obj):
n = MagicNode(obj)
self.children.append(n)
return n
def has_child(self, data):
for child in self.children:
if child.byte.lower() == data.lower():
return child
return None
def get_childrens_by_byte(self, data):
childrens = []
for child in self.children:
if child.byte.lower() == data.lower():
#return child
childrens.append(child)
return childrens
class Whatype(object):
WTver = "0.1"
WTrev = "01"
MAGICLIST_NAME = "magics.csv"
def __init__(self,magic_file=""):
if magic_file:
if os.path.isfile(magic_file):
self.magic_list_file = magic_file
else:
raise WhatypeErr("magics list load", "Couldn't find " + magic_file)
else:
default_mgc = os.path.join(os.path.dirname(os.path.realpath(__file__)),Whatype.MAGICLIST_NAME)
if os.path.isfile(default_mgc):
self.magic_list_file = default_mgc
else:
raise WhatypeErr("loading default magics list","Couldn't find default magics list. " \
"Please provide a magics CSV file")
# Create main prefix tree graph (Trie)
self.Tree = MagicNode("all_magics")
with open(self.magic_list_file, "r") as ins:
for line in ins:
parts = line.split(",")
# parts[0] = File Type
# parts[1] = Magic bytes
# parts[2] = File Ext
# parts[3] = File Strings
self.create_branch(0, self.Tree, parts[0], parts[1], parts[2],parts[3])
def create_branch(self, node_level, father, filetype, magic, ext, strings):
magic_bytes = magic.split(" ")
byte = magic_bytes[node_level]
son = father.has_child(byte)
node_level += 1
if (node_level < len(magic_bytes)):
if son is None:
son = father.add_child(byte)
self.create_branch(node_level, son, filetype, magic, ext,strings)
else:
if (node_level == len(magic_bytes)):
son = father.add_child(byte)
son.filetype = filetype
son.ext = ext
son.strings = strings
def print_tree(self,Node, index):
for nd in Node.children:
print "--" * index + nd.byte
if (len(nd.children) > 0):
self.print_tree(nd, index + 1)
def strings_search(self,strings_list, content):
bGood = True
for str in strings_list.split(";"):
if content.lower().find(str.lower().rstrip()) == -1:
bGood = False
return bGood
def return_magic(self,cont,Name,Ext):
if not Name:
Name = "Inconclusive. "
if self.istext(cont):
Name += "Probably text"
Ext = "TEXT"
else:
Name += "Probably binary"
Ext = "BINARY"
return Name,Ext
def istext(self,cont):
# Based on http://code.activestate.com/recipes/173220/
import string
text_characters = "".join(map(chr, range(32, 127)) + list("\n\r\t\b"))
_null_trans = string.maketrans("", "")
if not cont:
# Empty files are considered text
return True
if "\0" in cont:
# Files with null bytes are likely binary
return False
# Get the non-text characters (maps a character to itself then
# use the 'remove' option to get rid of the text characters.)
t = cont.translate(_null_trans, text_characters)
# If more than 30% non-text characters, then
# this is considered a binary file
if float(len(t))/float(len(cont)) > 0.30:
return False
return True
def find(self, cont, Node, index=0, magic_history=[]):
if cont == "" or cont is None:
return "",""
curr_byte = cont[index].encode('hex')
NextNode = Node.get_childrens_by_byte(curr_byte)
if NextNode:
magic_history.extend(NextNode)
Name, Ext = self.find(cont, NextNode[0], index+1, magic_history)
if Ext == "Rollback":
for i in range(len(magic_history)):
Node = magic_history.pop()
if Node.filetype != "":
if self.strings_search(Node.strings, cont):
return Node.filetype, Node.ext
else:
return Name, Ext
return self.return_magic(cont,"","")
#return ""
else:
# last hex node found
if Node.filetype != "":
if self.strings_search(Node.strings, cont):
return Node.filetype, Node.ext
if len(magic_history) == 0:
#return "",""
return self.return_magic(cont,"","")
return "", "Rollback" # Magic search went too far, rollbacking
def identify_file(self,filepath):
try:
file_content = open(filepath).read()
return self.find(file_content, self.Tree)
except Exception, e:
raise WhatypeErr("file identification", str(e))
def identify_buffer(self,file_content):
try:
return self.find(file_content, self.Tree,0,[])
except Exception, e:
raise WhatypeErr("buffer identification", str(e)) | gpl-3.0 |
DavidNorman/tensorflow | tensorflow/python/kernel_tests/reduction_ops_test_big.py | 30 | 8764 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class BaseReductionTest(test.TestCase):
def _tf_reduce(self, x, reduction_axes, keepdims):
raise NotImplementedError()
class BigReductionTest(BaseReductionTest):
"""Test reductions for sum and boolean all over a wide range of shapes."""
def _tf_reduce_max(self, x, reduction_axes, keepdims):
return math_ops.reduce_max(x, reduction_axes, keepdims)
def _tf_reduce_all(self, x, reduction_axes, keepdims):
return math_ops.reduce_all(x, reduction_axes, keepdims)
def _tf_reduce_mean(self, x, reduction_axes, keepdims):
return math_ops.reduce_mean(x, reduction_axes, keepdims)
def _tf_reduce_sum(self, x, reduction_axes, keepdims):
return math_ops.reduce_sum(x, reduction_axes, keepdims)
@test_util.run_deprecated_v1
def testFloat32Sum(self):
# make sure we test all possible kernel invocations
# logic is the same for all ops, test just float32 for brevity
arr_ = np.ones([4097, 4097], dtype=np.float32)
for size_x in [
1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025,
4096, 4097
]:
for size_y in [
1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025,
4096, 4097
]:
arr = arr_[0:size_x, 0:size_y]
col_sum = np.ones([size_y], dtype=np.float32) * size_x
row_sum = np.ones([size_x], dtype=np.float32) * size_y
full_sum = np.ones([], dtype=np.float32) * size_x * size_y
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
arr_placeholder = array_ops.placeholder(dtype=np.float32,
shape=(size_x, size_y))
tf_row_sum = self._tf_reduce_sum(arr_placeholder, 1, False)
tf_col_sum = self._tf_reduce_sum(arr_placeholder, 0, False)
tf_full_sum = self._tf_reduce_sum(arr_placeholder, [0, 1], False)
tf_out_row, tf_out_col, tf_out_full = sess.run(
[tf_row_sum, tf_col_sum, tf_full_sum], {arr_placeholder: arr})
self.assertAllClose(col_sum, tf_out_col)
self.assertAllClose(row_sum, tf_out_row)
self.assertAllClose(full_sum, tf_out_full)
arr_ = np.ones([130, 130, 130], dtype=np.float32)
for size_x in range(1, 130, 13):
for size_y in range(1, 130, 13):
for size_z in range(1, 130, 13):
arr = arr_[0:size_x, 0:size_y, 0:size_z]
sum_y = np.ones([size_x, size_z], dtype=np.float32)
sum_xz = np.ones([size_y], dtype=np.float32)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
arr_placeholder = array_ops.placeholder(
dtype=np.float32, shape=(size_x, size_y, size_z))
tf_sum_xz = self._tf_reduce_mean(arr_placeholder, [0, 2], False)
tf_sum_y = self._tf_reduce_mean(arr_placeholder, 1, False)
tf_out_sum_xz, tf_out_sum_y = sess.run([tf_sum_xz, tf_sum_y],
{arr_placeholder: arr})
self.assertAllClose(sum_y, tf_out_sum_y)
self.assertAllClose(sum_xz, tf_out_sum_xz)
@test_util.run_deprecated_v1
def testFloat32Max(self):
# make sure we test all possible kernel invocations
# logic is the same for all ops, test just float32 for brevity
arr_ = np.random.uniform(
low=-3, high=-1, size=[4105, 4105]).astype(np.float32)
for size_x in [
1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025,
4096, 4097
]:
for size_y in [
1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025,
4096, 4097
]:
arr = arr_[0:size_x, 0:size_y]
col_max = np.max(arr, axis=0)
row_max = np.max(arr, axis=1)
full_max = np.max(col_max)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
arr_placeholder = array_ops.placeholder(dtype=np.float32,
shape=(size_x, size_y))
tf_row_max = self._tf_reduce_max(arr_placeholder, 1, False)
tf_col_max = self._tf_reduce_max(arr_placeholder, 0, False)
tf_full_max = self._tf_reduce_max(arr_placeholder, [0, 1], False)
tf_out_row, tf_out_col, tf_out_full = sess.run(
[tf_row_max, tf_col_max, tf_full_max], {arr_placeholder: arr})
self.assertAllClose(col_max, tf_out_col)
self.assertAllClose(row_max, tf_out_row)
self.assertAllClose(full_max, tf_out_full)
arr_ = np.random.uniform(
low=-3, high=-1, size=[130, 130, 130]).astype(np.float32)
for size_x in range(1, 130, 13):
for size_y in range(1, 130, 13):
for size_z in range(1, 130, 13):
arr = arr_[0:size_x, 0:size_y, 0:size_z]
sum_y = np.max(arr, axis=1)
sum_xz = np.max(arr, axis=(0, 2))
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
arr_placeholder = array_ops.placeholder(
dtype=np.float32, shape=(size_x, size_y, size_z))
tf_sum_xz = self._tf_reduce_max(arr_placeholder, [0, 2], False)
tf_sum_y = self._tf_reduce_max(arr_placeholder, 1, False)
tf_out_sum_xz, tf_out_sum_y = sess.run(
[tf_sum_xz, tf_sum_y], {arr_placeholder: arr})
self.assertAllClose(sum_y, tf_out_sum_y)
self.assertAllClose(sum_xz, tf_out_sum_xz)
@test_util.run_deprecated_v1
def testBooleanAll(self):
# make sure we test all possible kernel invocations
# test operation where T(0) is not the identity
arr_ = np.ones([4097, 4097], dtype=np.bool)
for size_x in [
1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025,
4096, 4097
]:
for size_y in [
1, 2, 3, 4, 16, 17, 32, 33, 64, 65, 128, 131, 256, 263, 1024, 1025,
4096, 4097
]:
arr = arr_[0:size_x, 0:size_y]
col_sum = np.ones([size_y], dtype=np.bool)
row_sum = np.ones([size_x], dtype=np.bool)
full_sum = np.ones([1], dtype=np.bool).reshape([])
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
arr_placeholder = array_ops.placeholder(dtype=np.bool,
shape=(size_x, size_y))
tf_row_sum = self._tf_reduce_all(arr_placeholder, 1, False)
tf_col_sum = self._tf_reduce_all(arr_placeholder, 0, False)
tf_full_sum = self._tf_reduce_all(arr_placeholder, [0, 1], False)
tf_out_row, tf_out_col, tf_out_full = sess.run(
[tf_row_sum, tf_col_sum, tf_full_sum], {arr_placeholder: arr})
self.assertAllClose(col_sum, tf_out_col)
self.assertAllClose(row_sum, tf_out_row)
self.assertAllClose(full_sum, tf_out_full)
arr_ = np.ones([130, 130, 130], dtype=np.bool)
for size_x in range(1, 130, 13):
for size_y in range(1, 130, 13):
for size_z in range(1, 130, 13):
arr = arr_[0:size_x, 0:size_y, 0:size_z]
sum_y = np.ones([size_x, size_z], dtype=np.bool)
sum_xz = np.ones([size_y], dtype=np.bool)
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
arr_placeholder = array_ops.placeholder(
dtype=np.bool, shape=(size_x, size_y, size_z))
tf_sum_xz = self._tf_reduce_all(arr_placeholder, [0, 2], False)
tf_sum_y = self._tf_reduce_all(arr_placeholder, 1, False)
tf_out_sum_xz, tf_out_sum_y = sess.run(
[tf_sum_xz, tf_sum_y], {arr_placeholder: arr})
self.assertAllClose(sum_y, tf_out_sum_y)
self.assertAllClose(sum_xz, tf_out_sum_xz)
if __name__ == "__main__":
test.main()
| apache-2.0 |
nikolas/lettuce | tests/integration/lib/Django-1.3/django/contrib/localflavor/pe/forms.py | 309 | 2272 | # -*- coding: utf-8 -*-
"""
PE-specific Form helpers.
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import RegexField, CharField, Select
from django.utils.translation import ugettext_lazy as _
class PERegionSelect(Select):
"""
A Select widget that uses a list of Peruvian Regions as its choices.
"""
def __init__(self, attrs=None):
from pe_region import REGION_CHOICES
super(PERegionSelect, self).__init__(attrs, choices=REGION_CHOICES)
class PEDNIField(CharField):
"""
A field that validates `Documento Nacional de IdentidadŽ (DNI) numbers.
"""
default_error_messages = {
'invalid': _("This field requires only numbers."),
'max_digits': _("This field requires 8 digits."),
}
def __init__(self, *args, **kwargs):
super(PEDNIField, self).__init__(max_length=8, min_length=8, *args,
**kwargs)
def clean(self, value):
"""
Value must be a string in the XXXXXXXX formats.
"""
value = super(PEDNIField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not value.isdigit():
raise ValidationError(self.error_messages['invalid'])
if len(value) != 8:
raise ValidationError(self.error_messages['max_digits'])
return value
class PERUCField(RegexField):
"""
This field validates a RUC (Registro Unico de Contribuyentes). A RUC is of
the form XXXXXXXXXXX.
"""
default_error_messages = {
'invalid': _("This field requires only numbers."),
'max_digits': _("This field requires 11 digits."),
}
def __init__(self, *args, **kwargs):
super(PERUCField, self).__init__(max_length=11, min_length=11, *args,
**kwargs)
def clean(self, value):
"""
Value must be an 11-digit number.
"""
value = super(PERUCField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not value.isdigit():
raise ValidationError(self.error_messages['invalid'])
if len(value) != 11:
raise ValidationError(self.error_messages['max_digits'])
return value
| gpl-3.0 |
SerCeMan/intellij-community | python/lib/Lib/site-packages/django/conf/locale/nn/formats.py | 685 | 1657 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
'%Y-%m-%d', # '2006-10-25',
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
| apache-2.0 |
slabanja/ase | ase/test/__init__.py | 1 | 3984 | import sys
import unittest
from glob import glob
import numpy as np
class NotAvailable(SystemExit):
def __init__(self, msg, code=0):
SystemExit.__init__(self, (msg,code,))
self.msg = msg
self.code = code
# -------------------------------------------------------------------
# Custom test case/suite for embedding unittests in the test scripts
if sys.version_info < (2, 4, 0, 'final', 0):
class CustomTestCase(unittest.TestCase):
assertTrue = unittest.TestCase.failUnless
assertFalse = unittest.TestCase.failIf
else:
from unittest import TestCase as CustomTestCase
from ase.parallel import paropen
class CustomTextTestRunner(unittest.TextTestRunner):
def __init__(self, logname, descriptions=1, verbosity=1):
self.f = paropen(logname, 'w')
unittest.TextTestRunner.__init__(self, self.f, descriptions, verbosity)
def run(self, test):
stderr_old = sys.stderr
try:
sys.stderr = self.f
testresult = unittest.TextTestRunner.run(self, test)
finally:
sys.stderr = stderr_old
return testresult
# -------------------------------------------------------------------
class ScriptTestCase(unittest.TestCase):
def __init__(self, methodname='testfile', filename=None, display=True):
unittest.TestCase.__init__(self, methodname)
self.filename = filename
self.display = display
def testfile(self):
try:
execfile(self.filename, {'display': self.display})
except KeyboardInterrupt:
raise RuntimeError('Keyboard interrupt')
except NotAvailable, err:
# Only non-zero error codes are failures
if err.code:
raise
def id(self):
return self.filename
def __str__(self):
return '%s (ScriptTestCase)' % self.filename.split('/')[-1]
def __repr__(self):
return "ScriptTestCase(filename='%s')" % self.filename
def test(verbosity=1, dir=None, display=True, stream=sys.stdout):
ts = unittest.TestSuite()
if dir is None:
dir = __path__[0]
tests = glob(dir + '/*.py')
tests.sort()
for test in tests:
if test.endswith('__init__.py'):
continue
if test.endswith('COCu111.py'):
lasttest = test
continue
ts.addTest(ScriptTestCase(filename=test, display=display))
ts.addTest(ScriptTestCase(filename=lasttest, display=display))
from ase.utils import devnull
sys.stdout = devnull
ttr = unittest.TextTestRunner(verbosity=verbosity, stream=stream)
results = ttr.run(ts)
sys.stdout = sys.__stdout__
return results
class World:
"""Class for testing parallelization with MPI"""
def __init__(self, size):
self.size = size
self.data = {}
def get_rank(self, rank):
return CPU(self, rank)
class CPU:
def __init__(self, world, rank):
self.world = world
self.rank = rank
self.size = world.size
def send(self, x, rank):
while (self.rank, rank) in self.world.data:
pass
self.world.data[(self.rank, rank)] = x
def receive(self, x, rank):
while (rank, self.rank) not in self.world.data:
pass
x[:] = self.world.data.pop((rank, self.rank))
def sum(self, x):
if not isinstance(x, np.ndarray):
x = np.array([x])
self.sum(x)
return x[0]
if self.rank == 0:
y = np.empty_like(x)
for rank in range(1, self.size):
self.receive(y, rank)
x += y
else:
self.send(x, 0)
self.broadcast(x, 0)
def broadcast(self, x, root):
if self.rank == root:
for rank in range(self.size):
if rank != root:
self.send(x, rank)
else:
self.receive(x, root)
| gpl-2.0 |
octavioturra/aritial | google_appengine/lib/django/django/contrib/admin/utils.py | 33 | 3621 | "Misc. utility functions/classes for admin documentation generator."
import re
from email.Parser import HeaderParser
from email.Errors import HeaderParseError
try:
import docutils.core
import docutils.nodes
import docutils.parsers.rst.roles
except ImportError:
docutils_is_available = False
else:
docutils_is_available = True
def trim_docstring(docstring):
"""
Uniformly trims leading/trailing whitespace from docstrings.
Based on http://www.python.org/peps/pep-0257.html#handling-docstring-indentation
"""
if not docstring or not docstring.strip():
return ''
# Convert tabs to spaces and split into lines
lines = docstring.expandtabs().splitlines()
indent = min([len(line) - len(line.lstrip()) for line in lines if line.lstrip()])
trimmed = [lines[0].lstrip()] + [line[indent:].rstrip() for line in lines[1:]]
return "\n".join(trimmed).strip()
def parse_docstring(docstring):
"""
Parse out the parts of a docstring. Returns (title, body, metadata).
"""
docstring = trim_docstring(docstring)
parts = re.split(r'\n{2,}', docstring)
title = parts[0]
if len(parts) == 1:
body = ''
metadata = {}
else:
parser = HeaderParser()
try:
metadata = parser.parsestr(parts[-1])
except HeaderParseError:
metadata = {}
body = "\n\n".join(parts[1:])
else:
metadata = dict(metadata.items())
if metadata:
body = "\n\n".join(parts[1:-1])
else:
body = "\n\n".join(parts[1:])
return title, body, metadata
def parse_rst(text, default_reference_context, thing_being_parsed=None, link_base='../..'):
"""
Convert the string from reST to an XHTML fragment.
"""
overrides = {
'doctitle_xform' : True,
'inital_header_level' : 3,
"default_reference_context" : default_reference_context,
"link_base" : link_base,
}
if thing_being_parsed:
thing_being_parsed = "<%s>" % thing_being_parsed
parts = docutils.core.publish_parts(text, source_path=thing_being_parsed,
destination_path=None, writer_name='html',
settings_overrides=overrides)
return parts['fragment']
#
# reST roles
#
ROLES = {
'model' : '%s/models/%s/',
'view' : '%s/views/%s/',
'template' : '%s/templates/%s/',
'filter' : '%s/filters/#%s',
'tag' : '%s/tags/#%s',
}
def create_reference_role(rolename, urlbase):
def _role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None: options = {}
if content is None: content = []
node = docutils.nodes.reference(rawtext, text, refuri=(urlbase % (inliner.document.settings.link_base, text.lower())), **options)
return [node], []
docutils.parsers.rst.roles.register_canonical_role(rolename, _role)
def default_reference_role(name, rawtext, text, lineno, inliner, options=None, content=None):
if options is None: options = {}
if content is None: content = []
context = inliner.document.settings.default_reference_context
node = docutils.nodes.reference(rawtext, text, refuri=(ROLES[context] % (inliner.document.settings.link_base, text.lower())), **options)
return [node], []
if docutils_is_available:
docutils.parsers.rst.roles.register_canonical_role('cmsreference', default_reference_role)
docutils.parsers.rst.roles.DEFAULT_INTERPRETED_ROLE = 'cmsreference'
for name, urlbase in ROLES.items():
create_reference_role(name, urlbase)
| apache-2.0 |
lpeska/BRDTI | netlaprls.py | 1 | 2811 | '''
We base the NetLapRLS implementation on the one from PyDTI project, https://github.com/stephenliu0423/PyDTI, changes were made to the evaluation procedure
[1] Xia, Zheng, et al. "Semi-supervised drug-protein interaction prediction from heterogeneous biological spaces." BMC systems biology 4.Suppl 2 (2010): S6.
Default parameters:
gamma_d = 0.01, gamma_d=gamma_d2/gamma_d1
gamma_t = 0.01, gamma_t=gamma_p2/gamma_p1
beta_d = 0.3
beta_t = 0.3
'''
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.metrics import auc
from functions import normalized_discounted_cummulative_gain
class NetLapRLS:
def __init__(self, gamma_d=0.01, gamma_t=0.01, beta_d=0.3, beta_t=0.3):
self.gamma_d = float(gamma_d)
self.gamma_t = float(gamma_t)
self.beta_d = float(beta_d)
self.beta_t = float(beta_t)
def fix_model(self, W, intMat, drugMat, targetMat, seed=None):
R = W*intMat
m, n = R.shape
drugMat = (drugMat+drugMat.T)/2
targetMat = (targetMat+targetMat.T)/2
Wd = (drugMat+self.gamma_d*np.dot(R, R.T))/(1.0+self.gamma_d)
Wt = (targetMat+self.gamma_t*np.dot(R.T, R))/(1.0+self.gamma_t)
Wd = Wd-np.diag(np.diag(Wd))
Wt = Wt-np.diag(np.diag(Wt))
D = np.diag(np.sqrt(1.0/np.sum(Wd, axis=1)))
Ld = np.eye(m) - np.dot(np.dot(D, Wd), D)
D = np.diag(np.sqrt(1.0/np.sum(Wt, axis=1)))
Lt = np.eye(n) - np.dot(np.dot(D, Wt), D)
X = np.linalg.inv(Wd+self.beta_d*np.dot(Ld, Wd))
Fd = np.dot(np.dot(Wd, X), R)
X = np.linalg.inv(Wt+self.beta_t*np.dot(Lt, Wt))
Ft = np.dot(np.dot(Wt, X), R.T)
self.predictR = 0.5*(Fd+Ft.T)
def predict_scores(self, test_data, N):
inx = np.array(test_data)
return self.predictR[inx[:, 0], inx[:, 1]]
def evaluation(self, test_data, test_label):
scores = self.predictR[test_data[:, 0], test_data[:, 1]]
self.scores = scores
x, y = test_data[:, 0], test_data[:, 1]
test_data_T = np.column_stack((y,x))
ndcg = normalized_discounted_cummulative_gain(test_data, test_label, np.array(scores))
ndcg_inv = normalized_discounted_cummulative_gain(test_data_T, test_label, np.array(scores))
prec, rec, thr = precision_recall_curve(test_label, scores)
aupr_val = auc(rec, prec)
fpr, tpr, thr = roc_curve(test_label, scores)
auc_val = auc(fpr, tpr)
#!!!!we should distinguish here between inverted and not inverted methods nDCGs!!!!
return aupr_val, auc_val, ndcg, ndcg_inv
def __str__(self):
return "Model: NetLapRLS, gamma_d:%s, gamma_t:%s, beta_d:%s, beta_t:%s" % (self.gamma_d, self.gamma_t, self.beta_d, self.beta_t)
| gpl-2.0 |
albertodonato/toolrack | toolrack/tests/test_config.py | 1 | 7449 | from operator import attrgetter
import pytest
from ..config import (
Config,
ConfigKey,
ConfigKeyTypes,
InvalidConfigValue,
MissingConfigKey,
)
class TestConfigKeyTypes:
def test_get_converter_unknown_type(self):
"""An error is raised if type is unknown."""
with pytest.raises(TypeError):
ConfigKeyTypes().get_converter("unknown")
@pytest.mark.parametrize(
"conv_type,value,result",
[("int", "10", 10), ("float", "20.30", 20.30), ("str", 10, "10")],
)
def test_types(self, conv_type, value, result):
"""Values can be converted."""
converter = ConfigKeyTypes().get_converter(conv_type)
assert converter(value) == result
@pytest.mark.parametrize(
"value,result",
[
# true values
(3, True),
(["foo"], True),
("true", True),
("True", True),
("yes", True),
("Yes", True),
# false values
(0, False),
([], False),
("false", False),
("no", False),
("foo", False),
("", False),
],
)
def test_bool(self, value, result):
"""Bool values cna be converted."""
converter = ConfigKeyTypes().get_converter("bool")
assert converter(value) == result
@pytest.mark.parametrize("value", [("a", "b"), ["a", "b"], "a b"])
def test_list(self, value):
"""List values are converted to lists."""
converter = ConfigKeyTypes().get_converter("str[]")
assert converter(value) == ["a", "b"]
def test_list_of_ints(self):
"""List values are converted to the propert list type."""
converter = ConfigKeyTypes().get_converter("int[]")
assert converter("1 2") == [1, 2]
def test_list_of_unknown(self):
"""An error is raised if a list of unknown type is requested."""
with pytest.raises(TypeError):
ConfigKeyTypes().get_converter("unknown[]")
class TestConfigKey:
def test_instantiate(self):
"""A ConfigKey has a name."""
config_key = ConfigKey("key", "str")
assert config_key.name == "key"
assert config_key.description == ""
assert config_key.default is None
assert config_key.validator is None
assert not config_key.required
def test_instantiate_with_description(self):
"""A ConfigKey can have a description."""
config_key = ConfigKey("key", "str", description="a config key")
assert config_key.description == "a config key"
def test_instantiate_with_required(self):
"""A ConfigKey can be marked as required."""
config_key = ConfigKey("key", "str", required=True)
assert config_key.required
def test_instantiate_with_default(self):
"""A ConfigKey can have a default value."""
config_key = ConfigKey("key", "str", default=9)
assert config_key.default == 9
def test_instantiate_with_validator(self):
"""A ConfigKey can have a validator."""
validator = object() # just a marker
config_key = ConfigKey("key", "str", validator=validator)
assert config_key.validator is validator
@pytest.mark.parametrize(
"conv_type,value,result",
[
("str", "message", "message"),
("str", 9, "9"),
("int", "100", 100),
("float", "100.3", 100.3),
],
)
def test_parse(self, conv_type, value, result):
"""ConfigKey.parse parses values based on type."""
config_key = ConfigKey("key", conv_type)
assert config_key.parse(value) == result
def test_parse_invalid_value(self):
"""If the type conversion fails, an error is raised."""
config_key = ConfigKey("key", "int")
with pytest.raises(InvalidConfigValue):
config_key.parse("not an int")
def test_parse_with_validator(self):
"""If the validator fails, an error is raised."""
def validator(value):
raise ValueError("Wrong!")
config_key = ConfigKey("key", "str", validator=validator)
with pytest.raises(InvalidConfigValue):
config_key.parse("value")
def test_parse_with_validate(self):
"""If the ConfigKey.validate method fails, an error is raised."""
class ValidatedConfigKey(ConfigKey):
def validate(self, value):
raise ValueError("Wrong!")
config_key = ValidatedConfigKey("key", "str")
with pytest.raises(InvalidConfigValue):
config_key.parse("value")
class TestConfig:
def test_keys(self):
"""Config.keys return a sorted list of ConfigKeys."""
keys = [ConfigKey("foo", "str"), ConfigKey("bar", "str")]
config = Config(*keys)
assert config.keys() == sorted(keys, key=attrgetter("name"))
def test_extend(self):
"""Config.extend returns a new Config with additional keys."""
keys = [ConfigKey("foo", "str"), ConfigKey("bar", "str")]
config = Config(*keys)
new_keys = [ConfigKey("baz", "str"), ConfigKey("bza", "str")]
new_config = config.extend(*new_keys)
assert new_config is not config
all_keys = sorted(keys + new_keys, key=attrgetter("name"))
assert new_config.keys() == all_keys
def test_extend_overwrite(self):
"""Config.extend overwrites configuration keys with the same name."""
config = Config(ConfigKey("foo", "str"))
new_config = config.extend(ConfigKey("foo", "int"))
parsed = new_config.parse({"foo": "4"})
assert parsed == {"foo": 4}
def test_parse_empty(self):
"""If not config options are present, an empty dict is returned."""
config = Config()
assert config.parse({}) == {}
def test_parse_none(self):
"""If None is passed as config, an empty dict is returned."""
config = Config()
assert config.parse(None) == {}
def test_parse_converts_values(self):
"""Config.parse convert key values to their types."""
config = Config(ConfigKey("foo", "int"), ConfigKey("bar", "float"))
parsed = config.parse({"foo": "33", "bar": "20.1"})
assert parsed == {"foo": 33, "bar": 20.1}
def test_parse_unknown_key(self):
"""Config.parse ignores unknown keys."""
config = Config(ConfigKey("foo", "str"), ConfigKey("bar", "str"))
parsed = config.parse({"foo": "Foo", "bar": "Bar", "baz": "9"})
assert parsed == {"foo": "Foo", "bar": "Bar"}
def test_parse_missing_key(self):
"""If a required key is missing, an error is raised."""
config = Config(ConfigKey("foo", "str", required=True))
with pytest.raises(MissingConfigKey):
config.parse({})
def test_parse_invalid_value(self):
"""Config.parse raises an error if a value is invalid."""
config = Config(ConfigKey("foo", "int"), ConfigKey("bar", "float"))
with pytest.raises(InvalidConfigValue):
config.parse({"foo": "33", "bar": "invalid!"})
def test_parse_includes_defaults(self):
"""If a config key is missing, the default value is returned."""
config = Config(ConfigKey("foo", "str"), ConfigKey("bar", "str", default=10))
parsed = config.parse({"foo": "Foo"})
assert parsed == {"foo": "Foo", "bar": 10}
| lgpl-3.0 |
mick-d/nipype | tools/make_examples.py | 10 | 3014 | #!/usr/bin/env python
"""Run the py->rst conversion and run all examples.
This also creates the index.rst file appropriately, makes figures, etc.
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import open
from past.builtins import execfile
# -----------------------------------------------------------------------------
# Library imports
# -----------------------------------------------------------------------------
# Stdlib imports
import os
import sys
from glob import glob
# Third-party imports
# We must configure the mpl backend before making any further mpl imports
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib._pylab_helpers import Gcf
# Local tools
from toollib import *
# -----------------------------------------------------------------------------
# Globals
# -----------------------------------------------------------------------------
examples_header = """
.. _examples:
Examples
========
.. note_about_examples
"""
# -----------------------------------------------------------------------------
# Function defintions
# -----------------------------------------------------------------------------
# These global variables let show() be called by the scripts in the usual
# manner, but when generating examples, we override it to write the figures to
# files with a known name (derived from the script name) plus a counter
figure_basename = None
# We must change the show command to save instead
def show():
allfm = Gcf.get_all_fig_managers()
for fcount, fm in enumerate(allfm):
fm.canvas.figure.savefig('%s_%02i.png' %
(figure_basename, fcount + 1))
_mpl_show = plt.show
plt.show = show
# -----------------------------------------------------------------------------
# Main script
# -----------------------------------------------------------------------------
# Work in examples directory
cd('users/examples')
if not os.getcwd().endswith('users/examples'):
raise OSError('This must be run from doc/examples directory')
# Run the conversion from .py to rst file
sh('../../../tools/ex2rst --project Nipype --outdir . ../../../examples')
sh('../../../tools/ex2rst --project Nipype --outdir . ../../../examples/frontiers_paper')
# Make the index.rst file
"""
index = open('index.rst', 'w')
index.write(examples_header)
for name in [os.path.splitext(f)[0] for f in glob('*.rst')]:
#Don't add the index in there to avoid sphinx errors and don't add the
#note_about examples again (because it was added at the top):
if name not in(['index','note_about_examples']):
index.write(' %s\n' % name)
index.close()
"""
# Execute each python script in the directory.
if '--no-exec' in sys.argv:
pass
else:
if not os.path.isdir('fig'):
os.mkdir('fig')
for script in glob('*.py'):
figure_basename = pjoin('fig', os.path.splitext(script)[0])
execfile(script)
plt.close('all')
| bsd-3-clause |
lief-project/LIEF | examples/python/authenticode/api_example.py | 1 | 2223 | #!/usr/bin/env python
import lief
import sys
import os
# Parse PE file
pe = lief.parse(sys.argv[1])
sep = (":") if sys.version_info.minor > 7 else ()
# Get authenticode
print(pe.authentihash_md5.hex(*sep)) # 1c:a0:91:53:dc:9a:3a:5f:34:1d:7f:9b:b9:56:69:4d
print(pe.authentihash(lief.PE.ALGORITHMS.SHA_1).hex(*sep)) # 1e:ad:dc:29:1e:db:41:a2:69:c2:ba:ae:4b:fb:9d:31:e7:bb:ab:59
# Check signature according to PKCS #7 and Microsoft documentation
print(pe.verify_signature()) # Return VERIFICATION_FLAGS.OK
bin_ca = None
# Look for the root CA in the PE file
for crt in pe.signatures[0].certificates:
if crt.issuer == "C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert Assured ID Root CA":
bin_ca = crt
# Verify CA chain
bundle_path = os.getenv("LIEF_CA_BUNDLE", None) # Path to CA bundle (one can use those from signify:
# signify/certs/authenticode-bundle.pem)
if bundle_path is not None:
# Parse cert bundle and return a list of lief.PE.x509 objects
bundle = lief.PE.x509.parse(bundle_path)
print(bin_ca.is_trusted_by(bundle)) # VERIFICATION_FLAGS.OK
# Get the certificate used by the signer
cert_signer = pe.signatures[0].signers[0].cert
print(cert_signer)
bin_ca.verify(cert_signer) # Verify that cert_signer is signed the the CA
# running with:
# LIEF_CA_BUNDLE=signify/signify/certs/authenticode-bundle.pem python ./authenticode.py avast_free_antivirus_setup_online.exe
#
# 1c:a0:91:53:dc:9a:3a:5f:34:1d:7f:9b:b9:56:69:4d
# 1e:ad:dc:29:1e:db:41:a2:69:c2:ba:ae:4b:fb:9d:31:e7:bb:ab:59
# VERIFICATION_FLAGS.OK
# cert. version : 3
# serial number : 04:09:18:1B:5F:D5:BB:66:75:53:43:B5:6F:95:50:08
# issuer name : C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert Assured ID Root CA
# subject name : C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert SHA2 Assured ID Code Signing CA
# issued on : 2013-10-22 12:00:00
# expires on : 2028-10-22 12:00:00
# signed using : RSA with SHA-256
# RSA key size : 2048 bits
# basic constraints : CA=true, max_pathlen=0
# key usage : Digital Signature, Key Cert Sign, CRL Sign
# ext key usage : Code Signing
#
# VERIFICATION_FLAGS.OK
| apache-2.0 |
robclark/chromium | chrome/test/webdriver/test/run_webdriver_tests.py | 9 | 9476 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import optparse
import os
import sys
import types
import unittest
from chromedriver_launcher import ChromeDriverLauncher
import py_unittest_util
import test_paths
# Add the PYTHON_BINDINGS first so that our 'test' module is found instead of
# Python's.
sys.path = [test_paths.PYTHON_BINDINGS] + sys.path
from selenium.webdriver.remote.webdriver import WebDriver
# Implementation inspired from unittest.main()
class Main(object):
"""Main program for running WebDriver tests."""
_options, _args = None, None
TESTS_FILENAME = 'WEBDRIVER_TESTS'
_platform_map = {
'win32': 'win',
'darwin': 'mac',
'linux2': 'linux',
'linux3': 'linux',
}
TEST_PREFIX = 'selenium.test.selenium.webdriver.common.'
def __init__(self):
self._ParseArgs()
self._Run()
def _ParseArgs(self):
"""Parse command line args."""
parser = optparse.OptionParser()
parser.add_option(
'-v', '--verbose', action='store_true', default=False,
help='Output verbosely.')
parser.add_option(
'', '--log-file', type='string', default=None,
help='Provide a path to a file to which the logger will log')
parser.add_option(
'', '--filter', type='string', default='*',
help='Filter for specifying what tests to run, google test style.')
parser.add_option(
'', '--driver-exe', type='string', default=None,
help='Path to the default ChromeDriver executable to use.')
parser.add_option(
'', '--chrome-exe', type='string', default=None,
help='Path to the default Chrome executable to use.')
parser.add_option(
'', '--list', action='store_true', default=False,
help='List tests instead of running them.')
self._options, self._args = parser.parse_args()
# Setup logging - start with defaults
level = logging.WARNING
format = None
if self._options.verbose:
level=logging.DEBUG
format='%(asctime)s %(levelname)-8s %(message)s'
logging.basicConfig(level=level, format=format,
filename=self._options.log_file)
@staticmethod
def _IsTestClass(obj):
"""Returns whether |obj| is a unittest.TestCase."""
return isinstance(obj, (type, types.ClassType)) and \
issubclass(obj, unittest.TestCase)
@staticmethod
def _GetModuleFromName(test_name):
"""Return the module from the given test name.
Args:
test_name: dot-separated string for a module, a test case or a test
method
Examples: omnibox (a module)
omnibox.OmniboxTest (a test case)
omnibox.OmniboxTest.testA (a test method)
Returns:
tuple with first item corresponding to the module and second item
corresponding to the parts of the name that did not specify the module
Example: _GetModuleFromName('my_module.MyClass.testThis') returns
(my_module, ['MyClass', 'testThis'])
"""
parts = test_name.split('.')
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy: raise
for comp in parts[1:]:
if type(getattr(module, comp)) is not types.ModuleType:
break
module = getattr(module, comp)
return (module, parts[len(parts_copy):])
@staticmethod
def _GetTestsFromName(name):
"""Get a list of all test names from the given string.
Args:
name: dot-separated string for a module, a test case or a test method.
Examples: omnibox (a module)
omnibox.OmniboxTest (a test case)
omnibox.OmniboxTest.testA (a test method)
Returns:
[omnibox.OmniboxTest.testA, omnibox.OmniboxTest.testB, ...]
"""
def _GetTestsFromTestCase(class_obj):
"""Return all test method names from given class object."""
return [class_obj.__name__ + '.' + x for x in dir(class_obj) if
x.startswith('test')]
def _GetTestsFromModule(module):
"""Return all test method names from the given module object."""
tests = []
for name in dir(module):
obj = getattr(module, name)
if Main._IsTestClass(obj):
tests.extend([module.__name__ + '.' + x for x in
_GetTestsFromTestCase(obj)])
return tests
(obj, parts) = Main._GetModuleFromName(name)
for comp in parts:
obj = getattr(obj, comp)
if type(obj) == types.ModuleType:
return _GetTestsFromModule(obj)
elif Main._IsTestClass(obj):
return [module.__name__ + '.' + x for x in _GetTestsFromTestCase(obj)]
elif type(obj) == types.UnboundMethodType:
return [name]
else:
logging.warn('No tests in "%s"' % name)
return []
@staticmethod
def _EvalDataFrom(filename):
"""Return eval of python code from given file.
The datastructure used in the file will be preserved.
"""
data_file = os.path.join(filename)
contents = open(data_file).read()
try:
ret = eval(contents, {'__builtins__': None}, None)
except:
print >>sys.stderr, '%s is an invalid data file.' % data_file
raise
return ret
def _GetTestNamesFrom(self, filename):
modules = self._EvalDataFrom(filename)
all_names = modules.get('all', []) + \
modules.get(self._platform_map[sys.platform], [])
args = []
excluded = []
# Find all excluded tests. Excluded tests begin with '-'.
for name in all_names:
if name.startswith('-'): # Exclude
excluded.extend(self._GetTestsFromName(self.TEST_PREFIX + name[1:]))
else:
args.extend(self._GetTestsFromName(self.TEST_PREFIX + name))
for name in excluded:
args.remove(name)
if excluded:
logging.debug('Excluded %d test(s): %s' % (len(excluded), excluded))
return args
def _FakePytestHack(self):
"""Adds a fake 'pytest' module to the system modules.
A single test in text_handling_tests.py depends on the pytest module for
its test skipping capabilities. Without pytest, we can not run any tests
in the text_handling_tests.py module.
We are not sure we want to add pytest to chrome's third party dependencies,
so for now create a fake pytest module so that we can at least import and
run all the tests that do not depend on it. Those depending on it are
disabled.
"""
import imp
sys.modules['pytest'] = imp.new_module('pytest')
sys.modules['pytest'].mark = imp.new_module('mark')
sys.modules['pytest'].mark.ignore_chrome = lambda x: x
def _Run(self):
"""Run the tests."""
# TODO(kkania): Remove this hack.
self._FakePytestHack()
# In the webdriver tree, the python 'test' module is moved under the root
# 'selenium' one for testing. Here we mimic that by setting the 'selenium'
# module's 'test' attribute and adding 'selenium.test' to the system
# modules.
import selenium
import test
selenium.test = test
sys.modules['selenium.test'] = test
# Load and decide which tests to run.
test_names = self._GetTestNamesFrom(
os.path.join(os.path.dirname(__file__), self.TESTS_FILENAME))
all_tests_suite = unittest.defaultTestLoader.loadTestsFromNames(test_names)
filtered_suite = py_unittest_util.FilterTestSuite(
all_tests_suite, self._options.filter)
if self._options.list is True:
print '\n'.join(py_unittest_util.GetTestNamesFromSuite(filtered_suite))
sys.exit(0)
# The tests expect to run with preset 'driver' and 'webserver' class
# properties.
driver_exe = self._options.driver_exe or test_paths.CHROMEDRIVER_EXE
chrome_exe = self._options.chrome_exe or test_paths.CHROME_EXE
if driver_exe is None or not os.path.exists(os.path.expanduser(driver_exe)):
raise RuntimeError('ChromeDriver could not be found')
if chrome_exe is None or not os.path.exists(os.path.expanduser(chrome_exe)):
raise RuntimeError('Chrome could not be found')
driver_exe = os.path.expanduser(driver_exe)
chrome_exe = os.path.expanduser(chrome_exe)
# Increase number of http client threads to 10 to prevent hangs.
# The hang seems to occur because Chrome keeps too many multiple
# simultaneous connections open to our webserver.
server = ChromeDriverLauncher(
os.path.expanduser(driver_exe), test_paths.WEBDRIVER_TEST_DATA,
http_threads=10).Launch()
driver = WebDriver(server.GetUrl(),
{'chrome.binary': os.path.expanduser(chrome_exe)})
# The tests expect a webserver. Since ChromeDriver also operates as one,
# just pass this dummy class with the right info.
class DummyWebserver:
pass
webserver = DummyWebserver()
webserver.port = server.GetPort()
for test in py_unittest_util.GetTestsFromSuite(filtered_suite):
test.__class__.driver = driver
test.__class__.webserver = webserver
verbosity = 1
if self._options.verbose:
verbosity = 2
result = py_unittest_util.GTestTextTestRunner(verbosity=verbosity).run(
filtered_suite)
server.Kill()
sys.exit(not result.wasSuccessful())
if __name__ == '__main__':
Main()
| bsd-3-clause |
the-engine-room/replication-sprint-02 | crowdataapp/migrations/0009_auto__chg_field_document_stored_validity_rate.py | 1 | 11069 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Document.stored_validity_rate'
db.alter_column(u'crowdataapp_document', 'stored_validity_rate', self.gf('django.db.models.fields.DecimalField')(max_digits=3, decimal_places=2))
def backwards(self, orm):
# Changing field 'Document.stored_validity_rate'
db.alter_column(u'crowdataapp_document', 'stored_validity_rate', self.gf('django.db.models.fields.IntegerField')())
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'crowdataapp.document': {
'Meta': {'object_name': 'Document'},
'document_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'documents'", 'to': u"orm['crowdataapp.DocumentSet']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
'stored_validity_rate': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '3', 'decimal_places': '2'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': "'512'"})
},
u'crowdataapp.documentset': {
'Meta': {'object_name': 'DocumentSet'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'entries_threshold': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'head_html': ('django.db.models.fields.TextField', [], {'default': '\'<!-- <script> or <link rel="stylesheet"> tags go here -->\'', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'128'"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'template_function': ('django.db.models.fields.TextField', [], {'default': "'// Javascript function to insert the document into the DOM.\\n// Receives the URL of the document as its only parameter.\\n// Must be called insertDocument\\n// JQuery is available\\n// resulting element should be inserted into div#document-viewer-container\\nfunction insertDocument(document_url) {\\n}\\n'"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'crowdataapp.documentsetfieldentry': {
'Meta': {'object_name': 'DocumentSetFieldEntry'},
'entry': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['crowdataapp.DocumentSetFormEntry']"}),
'field_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True'})
},
u'crowdataapp.documentsetform': {
'Meta': {'object_name': 'DocumentSetForm'},
'button_text': ('django.db.models.fields.CharField', [], {'default': "u'Submit'", 'max_length': '50'}),
'document_set': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'form'", 'unique': 'True', 'to': u"orm['crowdataapp.DocumentSet']"}),
'email_copies': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'email_from': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_subject': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'send_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'default': '[1]', 'to': u"orm['sites.Site']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'crowdataapp.documentsetformentry': {
'Meta': {'object_name': 'DocumentSetFormEntry'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'form_entries'", 'null': 'True', 'to': u"orm['crowdataapp.Document']"}),
'entry_time': ('django.db.models.fields.DateTimeField', [], {}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': u"orm['crowdataapp.DocumentSetForm']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'crowdataapp.documentsetformfield': {
'Meta': {'object_name': 'DocumentSetFormField'},
'autocomplete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'choices': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'default': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'field_type': ('django.db.models.fields.IntegerField', [], {}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['crowdataapp.DocumentSetForm']"}),
'help_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'placeholder_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['crowdataapp'] | mit |
VanirAOSP/external_chromium_org | tools/deep_memory_profiler/tests/mock_gsutil.py | 131 | 1558 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
import zipfile
def main():
ZIP_PATTERN = re.compile('dmprof......\.zip')
assert len(sys.argv) == 6
assert sys.argv[1] == 'cp'
assert sys.argv[2] == '-a'
assert sys.argv[3] == 'public-read'
assert ZIP_PATTERN.match(os.path.basename(sys.argv[4]))
assert sys.argv[5] == 'gs://test-storage/'
zip_file = zipfile.ZipFile(sys.argv[4], 'r')
expected_nameset = set(['heap.01234.0001.heap',
'heap.01234.0002.heap',
'heap.01234.0001.buckets',
'heap.01234.0002.buckets',
'heap.01234.symmap/maps',
'heap.01234.symmap/chrome.uvwxyz.readelf-e',
'heap.01234.symmap/chrome.abcdef.nm',
'heap.01234.symmap/files.json'])
assert set(zip_file.namelist()) == expected_nameset
heap_1 = zip_file.getinfo('heap.01234.0001.heap')
assert heap_1.CRC == 763099253
assert heap_1.file_size == 1107
buckets_1 = zip_file.getinfo('heap.01234.0001.buckets')
assert buckets_1.CRC == 2632528901
assert buckets_1.file_size == 2146
nm_chrome = zip_file.getinfo('heap.01234.symmap/chrome.abcdef.nm')
assert nm_chrome.CRC == 2717882373
assert nm_chrome.file_size == 131049
zip_file.close()
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
kevinlondon/youtube-dl | youtube_dl/extractor/jeuxvideo.py | 85 | 1990 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class JeuxVideoIE(InfoExtractor):
_VALID_URL = r'http://.*?\.jeuxvideo\.com/.*/(.*?)\.htm'
_TESTS = [{
'url': 'http://www.jeuxvideo.com/reportages-videos-jeux/0004/00046170/tearaway-playstation-vita-gc-2013-tearaway-nous-presente-ses-papiers-d-identite-00115182.htm',
'md5': '046e491afb32a8aaac1f44dd4ddd54ee',
'info_dict': {
'id': '114765',
'ext': 'mp4',
'title': 'Tearaway : GC 2013 : Tearaway nous présente ses papiers d\'identité',
'description': 'Lorsque les développeurs de LittleBigPlanet proposent un nouveau titre, on ne peut que s\'attendre à un résultat original et fort attrayant.',
},
}, {
'url': 'http://www.jeuxvideo.com/videos/chroniques/434220/l-histoire-du-jeu-video-la-saturn.htm',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title = mobj.group(1)
webpage = self._download_webpage(url, title)
title = self._html_search_meta('name', webpage)
config_url = self._html_search_regex(
r'data-src="(/contenu/medias/video.php.*?)"',
webpage, 'config URL')
config_url = 'http://www.jeuxvideo.com' + config_url
video_id = self._search_regex(
r'id=(\d+)',
config_url, 'video ID')
config = self._download_json(
config_url, title, 'Downloading JSON config')
formats = [{
'url': source['file'],
'format_id': source['label'],
'resolution': source['label'],
} for source in reversed(config['sources'])]
return {
'id': video_id,
'title': title,
'formats': formats,
'description': self._og_search_description(webpage),
'thumbnail': config.get('image'),
}
| unlicense |
sacharya/nova | nova/openstack/common/rpc/serializer.py | 72 | 1600 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides the definition of an RPC serialization handler"""
import abc
class Serializer(object):
"""Generic (de-)serialization definition base class."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def serialize_entity(self, context, entity):
"""Serialize something to primitive form.
:param context: Security context
:param entity: Entity to be serialized
:returns: Serialized form of entity
"""
pass
@abc.abstractmethod
def deserialize_entity(self, context, entity):
"""Deserialize something from primitive form.
:param context: Security context
:param entity: Primitive to be deserialized
:returns: Deserialized form of entity
"""
pass
class NoOpSerializer(Serializer):
"""A serializer that does nothing."""
def serialize_entity(self, context, entity):
return entity
def deserialize_entity(self, context, entity):
return entity
| apache-2.0 |
RyanSkraba/beam | sdks/python/apache_beam/io/gcp/bigquery_io_read_it_test.py | 7 | 2252 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Dataflow job that counts the number of rows in a BQ table.
Can be configured to simulate slow reading for a given number of rows.
"""
from __future__ import absolute_import
import logging
import unittest
from hamcrest.core.core.allof import all_of
from nose.plugins.attrib import attr
from apache_beam.io.gcp import bigquery_io_read_pipeline
from apache_beam.testing.pipeline_verifiers import PipelineStateMatcher
from apache_beam.testing.test_pipeline import TestPipeline
class BigqueryIOReadIT(unittest.TestCase):
DEFAULT_DATASET = "big_query_import_export"
DEFAULT_TABLE_PREFIX = "export_"
NUM_RECORDS = {"empty": 0,
"1M": 10592,
"1G": 11110839,
"1T": 11110839000,}
def run_bigquery_io_read_pipeline(self, input_size):
test_pipeline = TestPipeline(is_integration_test=True)
pipeline_verifiers = [PipelineStateMatcher(),]
extra_opts = {'input_table': self.DEFAULT_DATASET + "." +
self.DEFAULT_TABLE_PREFIX + input_size,
'num_records': self.NUM_RECORDS[input_size],
'on_success_matcher': all_of(*pipeline_verifiers)}
bigquery_io_read_pipeline.run(test_pipeline.get_full_options_as_args(
**extra_opts))
@attr('IT')
def test_bigquery_read_1M_python(self):
self.run_bigquery_io_read_pipeline('1M')
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| apache-2.0 |
elthariel/dff | ui/console/console.py | 1 | 4149 | # DFF -- An Open Source Digital Forensics Framework
# Copyright (C) 2009-2010 ArxSys
# This program is free software, distributed under the terms of
# the GNU General Public License Version 2. See the LICENSE file
# at the top of the source tree.
#
# See http://www.digital-forensic.org for more information about this
# project. Please do not directly contact any of the maintainers of
# DFF for assistance; the project provides a web site, mailing lists
# and IRC channels for your use.
#
# Author(s):
# Christophe Malinge <[email protected]>
# Frederic Baguelin <[email protected]>
#
import sys,string, os, traceback, types, completion, signal
import line_to_arguments
from cmd import *
#from api.vfs import *
#from api.taskmanager.taskmanager import TaskManager
from api.manager.manager import ApiManager
from ui.console.complete_raw_input import complete_raw_input
from ui.history import history
PROMPT = "dff / > "
INTRO = "\nWelcome to the Digital Forensic Framework\n"
IDENTCHARS = string.ascii_letters + string.digits + '\ _='
class console(Cmd):
def __init__(self, completekey='tab', stdin=None, stdout=None):
Cmd.__init__(self, completekey, stdin, stdout)
self.history = history()
self.api = ApiManager()
self.vfs = self.api.vfs()
self.taskmanager = self.api.TaskManager()
self.line_to_arguments = line_to_arguments.Line_to_arguments()
self.old_completer = ""
self.prompt = "dff / > "
self.intro = "\n##########################################\n\
# Welcome on Digital Forensics Framework #\n\
##########################################\n"
self.stdin = self
self.completekey = '\t'
self.comp_raw = complete_raw_input(self)
self.completion = completion.Completion(self.comp_raw)
if os.name == 'posix':
signal.signal(signal.SIGTSTP, self.bg)
def bg(self, signum, trace):
if self.taskmanager.current_proc:
proc = self.taskmanager.current_proc
proc.exec_flags += ["thread"]
print "\n\n[" + str(proc.pid) + "]" + " background " + proc.name
self.taskmanager.current_proc = None
self.cmdloop()
def precmd(self, line):
return line
def postcmd(self, stop, line):
self.prompt = "dff " + self.vfs.getcwd().path + "/" + self.vfs.getcwd().name + " > "
return stop
def preloop(self):
return
def postloop(self):
print "Exiting..."
def onecmd(self, line):
try:
if line == 'exit' or line == 'quit':
return 'stop'
exc_list = self.line_to_arguments.generate(line)
if exc_list != None and len(exc_list) > 0:
for exc in exc_list:
exec_type = ["console"]
if line[-1:] == "&":
exec_type += ["thread"]
for cmd, args in exc.iteritems():
if cmd != None:
self.history.add(line.strip())
self.taskmanager.add(cmd, args,exec_type)
else:
return self.emptyline()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, None, sys.stdout)
def emptyline(self):
pass
def default(self, line):
try:
exec(line) in self._locals, self._globals
except Exception, e:
print e.__class__, ":", e
def cmdloop(self, intro=None):
self.preloop()
if self.intro:
print self.intro
self.intro = None
else:
print ''
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
line = self.comp_raw.raw_input()
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
def complete(self, line, begidx):
line = str(line).strip('\n')
self.completion_matches = self.completion.complete(line, begidx)
try:
return self.completion_matches
except IndexError:
return None
| gpl-2.0 |
tuxfux-hlp-notes/python-batches | archieves/batch-65/16-files/sheets/lib/python2.7/site-packages/pip/utils/logging.py | 516 | 3327 | from __future__ import absolute_import
import contextlib
import logging
import logging.handlers
import os
try:
import threading
except ImportError:
import dummy_threading as threading
from pip.compat import WINDOWS
from pip.utils import ensure_dir
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
_log_state = threading.local()
_log_state.indentation = 0
@contextlib.contextmanager
def indent_log(num=2):
"""
A context manager which will cause the log output to be indented for any
log messages emitted inside it.
"""
_log_state.indentation += num
try:
yield
finally:
_log_state.indentation -= num
def get_indentation():
return getattr(_log_state, 'indentation', 0)
class IndentingFormatter(logging.Formatter):
def format(self, record):
"""
Calls the standard formatter, but will indent all of the log messages
by our current indentation level.
"""
formatted = logging.Formatter.format(self, record)
formatted = "".join([
(" " * get_indentation()) + line
for line in formatted.splitlines(True)
])
return formatted
def _color_wrap(*colors):
def wrapped(inp):
return "".join(list(colors) + [inp, colorama.Style.RESET_ALL])
return wrapped
class ColorizedStreamHandler(logging.StreamHandler):
# Don't build up a list of colors if we don't have colorama
if colorama:
COLORS = [
# This needs to be in order from highest logging level to lowest.
(logging.ERROR, _color_wrap(colorama.Fore.RED)),
(logging.WARNING, _color_wrap(colorama.Fore.YELLOW)),
]
else:
COLORS = []
def __init__(self, stream=None):
logging.StreamHandler.__init__(self, stream)
if WINDOWS and colorama:
self.stream = colorama.AnsiToWin32(self.stream)
def should_color(self):
# Don't colorize things if we do not have colorama
if not colorama:
return False
real_stream = (
self.stream if not isinstance(self.stream, colorama.AnsiToWin32)
else self.stream.wrapped
)
# If the stream is a tty we should color it
if hasattr(real_stream, "isatty") and real_stream.isatty():
return True
# If we have an ASNI term we should color it
if os.environ.get("TERM") == "ANSI":
return True
# If anything else we should not color it
return False
def format(self, record):
msg = logging.StreamHandler.format(self, record)
if self.should_color():
for level, color in self.COLORS:
if record.levelno >= level:
msg = color(msg)
break
return msg
class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler):
def _open(self):
ensure_dir(os.path.dirname(self.baseFilename))
return logging.handlers.RotatingFileHandler._open(self)
class MaxLevelFilter(logging.Filter):
def __init__(self, level):
self.level = level
def filter(self, record):
return record.levelno < self.level
| gpl-3.0 |
kwurst/grading-scripts | assignmentconvert.py | 1 | 1952 | # Copyright (C) 2014 Karl R. Wurst
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
import argparse
import os
from assignment import Assignment
from command import Command
class LabConvert(object):
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('config', help='JSON configuration file')
parser.add_argument(
'-v', '--verbose',
help='increase output verbosity',
action='store_true'
)
args = parser.parse_args()
Command.set_default_verbosity(args.verbose)
self._a2pdf = Command(
'a2pdf --noperl-syntax --noline-numbers "{ins}" -o "{ins}.pdf"')
self._pdfcat = Command('pdftk "{ins}" cat output "{outs}"')
self._create_log = Command('git log > log.txt')
self._rm = Command('rm "{ins}"')
Assignment(args.config).accept(self.process_submission)
def process_submission(self, directory, files):
self._create_log()
self._a2pdf.each(files + ['log.txt'])
outpdf = directory.name + '.pdf'
pdfs = [str(f) + '.pdf' for f in files] + [directory/'log.txt.pdf']
self._pdfcat(pdfs, outpdf)
self._rm(pdfs)
self._rm(directory/'log.txt')
if __name__ == '__main__':
LabConvert()
| gpl-3.0 |
anisridhar/AudioShop | analysisClass.py | 1 | 5377 | import pyaudio
import wave
import sys
import time
import cv2
import numpy as np
import os
from Tkinter import *
from pydubtest import play, make_chunks
from pydub import AudioSegment
from threading import Thread
from vidAnalysis import vid2SoundFile
from eventBasedAnimationClass import EventBasedAnimationClass
import imagesDemo1
from buttonClass import button
from TITLEclass import TITLE
from barePageClass import barePage
from audioEditClass import AUDIOEDIT
from fingerTrackingClass import FINGERTRACKING
class ANALYSIS(barePage):
#allows user to compare actual recording and denoised
def __init__(self,width,height):
super(ANALYSIS,self).__init__(width,height)
self.started = False
def initAnalysis(self):
self.next = None
self.song = song1 = AudioSegment.from_wav("originalMusic.wav")
song1 = self.song[1000*self.start:1000*self.end]
self.song2 = song2 = vid2SoundFile(self.start,self.end,self.fingerData)
#initializing audio trackBars
self.bar1 = trackBar(song1,self.width,self.height/3)
self.bar2 = trackBar(song2,self.width,self.height*2/3)
#getting new timerDelay
self.timerDelay = int(round(float(len(song1)/(self.bar1.rightEdge-self.bar1.leftEdge))))
def draw(self,canvas):
canvas.create_rectangle(0,0,self.width,self.height,fill="black")
text1 = "Music from original audio file"
text2 = "Music from Video Analysis"
canvas.create_text(self.width/2,self.height/3-50, text = text1,fill="white")
canvas.create_text(self.width/2,self.height*2/3-50,text=text2,fill="white")
self.bar1.draw(canvas)
self.bar2.draw(canvas)
def onMousePressed(self,event):
self.bar1.onMousePressed(event)
self.bar2.onMousePressed(event)
def onTimerFired(self):
if self.started:
self.bar1.onTimerFired()
self.bar2.onTimerFired()
def onKeyPressed(self,event):
self.bar2.onKeyPressed(event)
if event.keysym == "Right":
self.song = self.song[:1000*self.start] + self.bar2.song + self.song[1000*self.end:]
self.next = 1
class trackBar(object):
#creates a trackbar
def __init__(self,song,width,cy):
self.song = song
self.width = width
self.cy = cy
self.leftEdge = self.width/4
self.rightEdge = 3*self.width/4
self.trackHeight = 30
self.lineHeight = self.trackHeight*2
self.controlWidth = self.trackHeight
self.control = "play"
#self.timerDelay = int(round(float(len(self.song)/(self.rightEdge-self.leftEdge))))
self.trackX = self.leftEdge
self.recordingStart = 0
def onMousePressed(self,event):
if (self.leftEdge-self.controlWidth-5 <= event.x <= self.leftEdge-5 and
self.cy-self.trackHeight/2 <= event.y <= self.cy+self.trackHeight/2):
self.control = "pause" if self.control == "play" else "play"
if self.control == "pause": self.getAudioThread()
elif self.control == "play" and self.trackX == self.rightEdge:
self.recordingStart = 0
self.trackX = self.leftEdge
def getAudioThread(self):
self.t = Thread(target = self.playAudio)
self.t.start()
def playAudio(self):
#taken from source and modified: http://people.csail.mit.edu/hubert/pyaudio/
song = self.song[self.recordingStart:]
#below is taken from a module
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(song.sample_width),
channels=song.channels,
rate=song.frame_rate,
output=True)
# break audio into half-second chunks (to allows keyboard interrupts)
startTime = time.time()
for chunk in make_chunks(song, 500):
#modified the area below to suit purposes of the program
if self.control == "play":
self.recordingStart += int(round(1000*(time.time() - startTime)))
stream.stop_stream()
stream.close()
p.terminate()
return
stream.write(chunk._data)
self.recordingStart = 0
stream.stop_stream()
stream.close()
p.terminate()
def drawStatusLine(self,canvas):
(x0,y0) = (self.trackX,self.cy-self.lineHeight/2)
(x1,y1) = (self.trackX,self.cy+self.lineHeight/2)
canvas.create_line(x0,y0,x1,y1,fill="white")
def onTimerFired(self):
if self.control == "pause": self.trackX += 1
if self.trackX >= self.rightEdge:
self.trackX = self.rightEdge
def draw(self,canvas):
self.drawBar(canvas)
self.drawStatusLine(canvas)
if self.control == "play": self.drawPlay(canvas)
elif self.control == "pause": self.drawPause(canvas)
def drawBar(self,canvas):
(x0,y0) = (self.leftEdge,self.cy-self.trackHeight/2)
(x1,y1) = (self.rightEdge,self.cy+self.trackHeight/2)
canvas.create_rectangle(x0,y0,x1,y1,fill="blue")
def drawPlay(self,canvas):
v1 = (self.leftEdge-self.controlWidth - 5,self.cy-self.trackHeight/2)
v2 = (self.leftEdge-self.controlWidth-5,self.cy+self.trackHeight/2)
v3 = (self.leftEdge-5,self.cy)
canvas.create_polygon(v1,v2,v3,fill="purple")
def drawPause(self,canvas):
rectangleWidth = self.controlWidth/3
#creating first rectangle
r01 = (x01,y01) = (self.leftEdge-self.controlWidth - 5,self.cy-self.trackHeight/2)
r02 = (x02,y02) = (x01+rectangleWidth,self.cy+self.trackHeight/2)
canvas.create_rectangle(r01,r02,fill="purple")
# creating second rectangle
r11 = (x11,y11) = (x01+2*rectangleWidth-5,y01)
r12 = (x11+rectangleWidth,y02)
canvas.create_rectangle(r11,r12,fill="purple")
def onKeyPressed(self,event):
if event.keysym == "Up":
self.song += 1
elif event.keysym == "Down":
self.song -= 1
| mit |
Cinntax/home-assistant | homeassistant/helpers/state.py | 1 | 8248 | """Helpers that help with state related things."""
import asyncio
import datetime as dt
import json
import logging
from collections import defaultdict
from types import ModuleType, TracebackType
from typing import Awaitable, Dict, Iterable, List, Optional, Tuple, Type, Union
from homeassistant.loader import bind_hass, async_get_integration, IntegrationNotFound
import homeassistant.util.dt as dt_util
from homeassistant.components.notify import ATTR_MESSAGE, SERVICE_NOTIFY
from homeassistant.components.sun import STATE_ABOVE_HORIZON, STATE_BELOW_HORIZON
from homeassistant.components.mysensors.switch import ATTR_IR_CODE, SERVICE_SEND_IR_CODE
from homeassistant.components.cover import ATTR_POSITION, ATTR_TILT_POSITION
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_OPTION,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_DISARM,
SERVICE_ALARM_TRIGGER,
SERVICE_LOCK,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_UNLOCK,
SERVICE_OPEN_COVER,
SERVICE_CLOSE_COVER,
SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
STATE_CLOSED,
STATE_HOME,
STATE_LOCKED,
STATE_NOT_HOME,
STATE_OFF,
STATE_ON,
STATE_OPEN,
STATE_UNKNOWN,
STATE_UNLOCKED,
SERVICE_SELECT_OPTION,
)
from homeassistant.core import Context, State, DOMAIN as HASS_DOMAIN
from homeassistant.util.async_ import run_coroutine_threadsafe
from .typing import HomeAssistantType
_LOGGER = logging.getLogger(__name__)
GROUP_DOMAIN = "group"
# Update this dict of lists when new services are added to HA.
# Each item is a service with a list of required attributes.
SERVICE_ATTRIBUTES = {
SERVICE_NOTIFY: [ATTR_MESSAGE],
SERVICE_SEND_IR_CODE: [ATTR_IR_CODE],
SERVICE_SELECT_OPTION: [ATTR_OPTION],
SERVICE_SET_COVER_POSITION: [ATTR_POSITION],
SERVICE_SET_COVER_TILT_POSITION: [ATTR_TILT_POSITION],
}
# Update this dict when new services are added to HA.
# Each item is a service with a corresponding state.
SERVICE_TO_STATE = {
SERVICE_TURN_ON: STATE_ON,
SERVICE_TURN_OFF: STATE_OFF,
SERVICE_ALARM_ARM_AWAY: STATE_ALARM_ARMED_AWAY,
SERVICE_ALARM_ARM_HOME: STATE_ALARM_ARMED_HOME,
SERVICE_ALARM_DISARM: STATE_ALARM_DISARMED,
SERVICE_ALARM_TRIGGER: STATE_ALARM_TRIGGERED,
SERVICE_LOCK: STATE_LOCKED,
SERVICE_UNLOCK: STATE_UNLOCKED,
SERVICE_OPEN_COVER: STATE_OPEN,
SERVICE_CLOSE_COVER: STATE_CLOSED,
}
class AsyncTrackStates:
"""
Record the time when the with-block is entered.
Add all states that have changed since the start time to the return list
when with-block is exited.
Must be run within the event loop.
"""
def __init__(self, hass: HomeAssistantType) -> None:
"""Initialize a TrackStates block."""
self.hass = hass
self.states: List[State] = []
# pylint: disable=attribute-defined-outside-init
def __enter__(self) -> List[State]:
"""Record time from which to track changes."""
self.now = dt_util.utcnow()
return self.states
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
"""Add changes states to changes list."""
self.states.extend(get_changed_since(self.hass.states.async_all(), self.now))
def get_changed_since(
states: Iterable[State], utc_point_in_time: dt.datetime
) -> List[State]:
"""Return list of states that have been changed since utc_point_in_time."""
return [state for state in states if state.last_updated >= utc_point_in_time]
@bind_hass
def reproduce_state(
hass: HomeAssistantType,
states: Union[State, Iterable[State]],
blocking: bool = False,
) -> None:
"""Reproduce given state."""
return run_coroutine_threadsafe( # type: ignore
async_reproduce_state(hass, states, blocking), hass.loop
).result()
@bind_hass
async def async_reproduce_state(
hass: HomeAssistantType,
states: Union[State, Iterable[State]],
blocking: bool = False,
context: Optional[Context] = None,
) -> None:
"""Reproduce a list of states on multiple domains."""
if isinstance(states, State):
states = [states]
to_call: Dict[str, List[State]] = defaultdict(list)
for state in states:
to_call[state.domain].append(state)
async def worker(domain: str, states_by_domain: List[State]) -> None:
try:
integration = await async_get_integration(hass, domain)
except IntegrationNotFound:
_LOGGER.warning(
"Trying to reproduce state for unknown integration: %s", domain
)
return
try:
platform: Optional[ModuleType] = integration.get_platform("reproduce_state")
except ImportError:
platform = None
if platform:
await platform.async_reproduce_states( # type: ignore
hass, states_by_domain, context=context
)
else:
await async_reproduce_state_legacy(
hass, domain, states_by_domain, blocking=blocking, context=context
)
if to_call:
# run all domains in parallel
await asyncio.gather(
*(worker(domain, data) for domain, data in to_call.items())
)
@bind_hass
async def async_reproduce_state_legacy(
hass: HomeAssistantType,
domain: str,
states: Iterable[State],
blocking: bool = False,
context: Optional[Context] = None,
) -> None:
"""Reproduce given state."""
to_call: Dict[Tuple[str, str], List[str]] = defaultdict(list)
if domain == GROUP_DOMAIN:
service_domain = HASS_DOMAIN
else:
service_domain = domain
for state in states:
if hass.states.get(state.entity_id) is None:
_LOGGER.warning(
"reproduce_state: Unable to find entity %s", state.entity_id
)
continue
domain_services = hass.services.async_services().get(service_domain)
if not domain_services:
_LOGGER.warning("reproduce_state: Unable to reproduce state %s (1)", state)
continue
service = None
for _service in domain_services.keys():
if (
_service in SERVICE_ATTRIBUTES
and all(
attr in state.attributes for attr in SERVICE_ATTRIBUTES[_service]
)
or _service in SERVICE_TO_STATE
and SERVICE_TO_STATE[_service] == state.state
):
service = _service
if (
_service in SERVICE_TO_STATE
and SERVICE_TO_STATE[_service] == state.state
):
break
if not service:
_LOGGER.warning("reproduce_state: Unable to reproduce state %s (2)", state)
continue
# We group service calls for entities by service call
# json used to create a hashable version of dict with maybe lists in it
key = (service, json.dumps(dict(state.attributes), sort_keys=True))
to_call[key].append(state.entity_id)
domain_tasks: List[Awaitable[Optional[bool]]] = []
for (service, service_data), entity_ids in to_call.items():
data = json.loads(service_data)
data[ATTR_ENTITY_ID] = entity_ids
domain_tasks.append(
hass.services.async_call(service_domain, service, data, blocking, context)
)
if domain_tasks:
await asyncio.wait(domain_tasks)
def state_as_number(state: State) -> float:
"""
Try to coerce our state to a number.
Raises ValueError if this is not possible.
"""
if state.state in (
STATE_ON,
STATE_LOCKED,
STATE_ABOVE_HORIZON,
STATE_OPEN,
STATE_HOME,
):
return 1
if state.state in (
STATE_OFF,
STATE_UNLOCKED,
STATE_UNKNOWN,
STATE_BELOW_HORIZON,
STATE_CLOSED,
STATE_NOT_HOME,
):
return 0
return float(state.state)
| apache-2.0 |
fxa90id/mozillians | mozillians/users/tests/test_tasks.py | 1 | 11847 | from datetime import datetime
from django.conf import settings
from django.contrib.auth.models import User
from django.test.utils import override_settings
from basket.base import BasketException
from celery.exceptions import Retry
from mock import patch
from nose.tools import eq_, ok_
from mozillians.common.tests import TestCase
from mozillians.users.tasks import (lookup_user_task, remove_incomplete_accounts,
subscribe_user_task, subscribe_user_to_basket,
unsubscribe_from_basket_task,
unsubscribe_user_task, update_email_in_basket)
from mozillians.users.tests import UserFactory
class IncompleteAccountsTests(TestCase):
"""Incomplete accounts removal tests."""
@patch('mozillians.users.tasks.datetime')
def test_remove_incomplete_accounts(self, datetime_mock):
"""Test remove incomplete accounts."""
complete_user = UserFactory.create(vouched=False,
date_joined=datetime(2012, 01, 01))
complete_vouched_user = UserFactory.create(date_joined=datetime(2013, 01, 01))
incomplete_user_not_old = UserFactory.create(date_joined=datetime(2013, 01, 01),
userprofile={'full_name': ''})
incomplete_user_old = UserFactory.create(date_joined=datetime(2012, 01, 01),
userprofile={'full_name': ''})
datetime_mock.now.return_value = datetime(2013, 01, 01)
remove_incomplete_accounts(days=0)
ok_(User.objects.filter(id=complete_user.id).exists())
ok_(User.objects.filter(id=complete_vouched_user.id).exists())
ok_(User.objects.filter(id=incomplete_user_not_old.id).exists())
ok_(not User.objects.filter(id=incomplete_user_old.id).exists())
class BasketTests(TestCase):
@override_settings(CELERY_ALWAYS_EAGER=True)
@patch('mozillians.users.tasks.BASKET_ENABLED', True)
@patch('mozillians.users.tasks.waffle.switch_is_active')
@patch('mozillians.users.tasks.unsubscribe_user_task')
@patch('mozillians.users.tasks.subscribe_user_task')
@patch('mozillians.users.tasks.lookup_user_task')
@patch('mozillians.users.tasks.basket')
def test_change_email(self, basket_mock, lookup_mock, subscribe_mock, unsubscribe_mock,
switch_is_active_mock):
# Create a new user
old_email = '[email protected]'
# We need vouched=False in order to avoid triggering a basket_update through signals.
user = UserFactory.create(email=old_email, vouched=False)
new_email = '[email protected]'
# Enable basket.
switch_is_active_mock.return_value = True
# Mock all the calls to basket.
basket_mock.lookup_user.return_value = {
'email': old_email, # the old value
'newsletters': ['foo', 'bar']
}
basket_mock.unsubscribe.return_value = {
'result': 'ok',
}
basket_mock.subscribe.return_value = {
'token': 'new token',
}
lookup_mock.reset_mock()
subscribe_mock.reset_mock()
unsubscribe_mock.reset_mock()
# When a user's email is changed, their old email is unsubscribed
# from all newsletters related to mozillians.org and their new email is subscribed to them.
update_email_in_basket(user.email, new_email)
# Verify subtask calls and call count
ok_(lookup_mock.subtask.called)
eq_(lookup_mock.subtask.call_count, 1)
ok_(subscribe_mock.subtask.called)
eq_(subscribe_mock.subtask.call_count, 1)
ok_(unsubscribe_mock.subtask.called)
eq_(unsubscribe_mock.subtask.call_count, 1)
# Verify call arguments
lookup_mock.subtask.assert_called_with((user.email,))
unsubscribe_mock.subtask.called_with(({'token': 'new token',
'email': '[email protected]',
'newsletters': ['foo', 'bar']},))
subscribe_mock.subtask.called_with(('[email protected]',))
@patch('mozillians.users.tasks.waffle.switch_is_active')
@patch('mozillians.users.tasks.unsubscribe_user_task')
@patch('mozillians.users.tasks.lookup_user_task')
@patch('mozillians.users.tasks.basket')
def test_unsubscribe_from_basket_task(self, basket_mock, lookup_mock, unsubscribe_mock,
switch_is_active_mock):
switch_is_active_mock.return_value = True
user = UserFactory.create(email='[email protected]')
basket_mock.lookup_user.return_value = {
'email': user.email, # the old value
'token': 'token',
'newsletters': ['foo', 'bar']
}
lookup_mock.reset_mock()
unsubscribe_mock.reset_mock()
with patch('mozillians.users.tasks.BASKET_ENABLED', True):
unsubscribe_from_basket_task(user.email, ['foo'])
eq_(lookup_mock.subtask.call_count, 1)
eq_(unsubscribe_mock.subtask.call_count, 1)
lookup_mock.subtask.assert_called_with((user.email,))
unsubscribe_mock.subtask.called_with((['foo'],))
@override_settings(CELERY_ALWAYS_EAGER=True)
@patch('mozillians.users.tasks.BASKET_ENABLED', True)
@patch('mozillians.users.tasks.waffle.switch_is_active')
@patch('mozillians.users.tasks.subscribe_user_task.subtask')
@patch('mozillians.users.tasks.lookup_user_task.subtask')
def test_subscribe_no_newsletters(self, lookup_mock, subscribe_mock, switch_is_active_mock):
switch_is_active_mock.return_value = True
user = UserFactory.create(vouched=False)
result = subscribe_user_to_basket.delay(user.userprofile.pk)
ok_(lookup_mock.called)
ok_(not subscribe_mock.called)
ok_(not result.get())
@patch('mozillians.users.tasks.basket.lookup_user')
def test_lookup_task_user_not_found(self, lookup_mock):
lookup_mock.side_effect = BasketException(u'User not found')
result = lookup_user_task(email='[email protected]')
eq_(result, {})
@patch('mozillians.users.tasks.lookup_user_task.retry')
@patch('mozillians.users.tasks.basket.lookup_user')
def test_lookup_task_basket_error(self, lookup_mock, retry_mock):
exc = BasketException(u'Error error error')
lookup_mock.side_effect = [exc, None]
retry_mock.side_effect = Retry
with self.assertRaises(Retry):
lookup_user_task(email='[email protected]')
retry_mock.called_with(exc)
def test_subscribe_user_task_no_result_no_email(self):
ok_(not subscribe_user_task(result={}, email=''))
@patch('mozillians.users.tasks.basket.subscribe')
def test_subscribe_user_task_no_email_no_newsletters(self, subscribe_mock):
result = {
'status': 'ok',
'newsletters': ['foo', 'bar', 'mozilla-phone'],
'email': '[email protected]'
}
subscribe_user_task(result=result, email=None)
subscribe_mock.assert_called_with('[email protected]', ['mozilla-phone'],
sync='N', trigger_welcome='N',
source_url=settings.SITE_URL,
api_key='basket_api_key')
@patch('mozillians.users.tasks.basket.subscribe')
def test_subscribe_user_task_no_newsletters(self, subscribe_mock):
result = {
'status': 'ok',
'newsletters': ['foo', 'bar'],
'email': '[email protected]'
}
subscribe_user_task(result=result, email='[email protected]')
subscribe_mock.assert_not_called()
@patch('mozillians.users.tasks.basket.subscribe')
def test_subscribe_user_task(self, subscribe_mock):
result = {
'status': 'ok',
'newsletters': ['foo', 'bar'],
'email': '[email protected]'
}
kwargs = {
'result': result,
'email': '[email protected]',
'newsletters': ['foobar', 'foo']
}
subscribe_user_task(**kwargs)
subscribe_mock.assert_called_with('[email protected]', ['foobar'],
sync='N', trigger_welcome='N',
source_url=settings.SITE_URL,
api_key='basket_api_key')
@patch('mozillians.users.tasks.basket.subscribe')
def test_subscribe_user_task_no_result(self, subscribe_mock):
kwargs = {
'result': {'status': 'error',
'desc': u'User not found'},
'email': '[email protected]',
'newsletters': ['mozilla-phone']
}
subscribe_user_task(**kwargs)
subscribe_mock.assert_called_with('[email protected]', ['mozilla-phone'],
sync='N', trigger_welcome='N',
source_url=settings.SITE_URL,
api_key='basket_api_key')
@patch('mozillians.users.tasks.subscribe_user_task.retry')
@patch('mozillians.users.tasks.basket.subscribe')
def test_subscribe_user_basket_error(self, subscribe_mock, retry_mock):
result = {
'status': 'ok',
'newsletters': ['foo', 'bar'],
'email': '[email protected]'
}
kwargs = {
'result': result,
'email': '[email protected]',
'newsletters': ['foobar', 'foo']
}
exc = BasketException(u'Error error error')
subscribe_mock.side_effect = [exc, None]
retry_mock.side_effect = Retry
with self.assertRaises(Retry):
subscribe_user_task(**kwargs)
retry_mock.called_with(exc)
def test_unsubscribe_user_no_result(self):
ok_(not unsubscribe_user_task(result={}))
@patch('mozillians.users.tasks.basket.unsubscribe')
def test_unsubscribe_user_task_success_no_newsletters(self, unsubscribe_mock):
result = {
'status': 'ok',
'newsletters': ['foo', 'bar', 'mozilla-phone'],
'email': '[email protected]',
'token': 'token'
}
unsubscribe_user_task(result)
unsubscribe_mock.assert_called_with(token='token', email='[email protected]',
newsletters=['mozilla-phone'], optout=False)
@patch('mozillians.users.tasks.basket.unsubscribe')
def test_unsubscribe_user_task_success(self, unsubscribe_mock):
result = {
'status': 'ok',
'newsletters': ['foo', 'bar', 'foobar'],
'email': '[email protected]',
'token': 'token'
}
unsubscribe_user_task(result, newsletters=['foo', 'bar'])
unsubscribe_mock.assert_called_with(token='token', email='[email protected]',
newsletters=['foo', 'bar'], optout=False)
@patch('mozillians.users.tasks.unsubscribe_user_task.retry')
@patch('mozillians.users.tasks.basket.unsubscribe')
def test_unsubscribe_user_basket_error(self, unsubscribe_mock, retry_mock):
result = {
'status': 'ok',
'newsletters': ['foo', 'bar'],
'email': '[email protected]',
'token': 'token'
}
exc = BasketException(u'Error error error')
unsubscribe_mock.side_effect = [exc, None]
retry_mock.side_effect = Retry
with self.assertRaises(Retry):
unsubscribe_user_task(result, newsletters=['foo', 'bar'])
retry_mock.called_with(exc)
| bsd-3-clause |
aladagemre/django-guardian | guardian/core.py | 9 | 5191 | from __future__ import unicode_literals
from itertools import chain
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from guardian.utils import get_identity
from guardian.utils import get_user_obj_perms_model
from guardian.utils import get_group_obj_perms_model
from guardian.compat import get_user_model
class ObjectPermissionChecker(object):
"""
Generic object permissions checker class being the heart of
``django-guardian``.
.. note::
Once checked for single object, permissions are stored and we don't hit
database again if another check is called for this object. This is great
for templates, views or other request based checks (assuming we don't
have hundreds of permissions on a single object as we fetch all
permissions for checked object).
On the other hand, if we call ``has_perm`` for perm1/object1, then we
change permission state and call ``has_perm`` again for same
perm1/object1 on same instance of ObjectPermissionChecker we won't see a
difference as permissions are already fetched and stored within cache
dictionary.
"""
def __init__(self, user_or_group=None):
"""
:param user_or_group: should be an ``User``, ``AnonymousUser`` or
``Group`` instance
"""
self.user, self.group = get_identity(user_or_group)
self._obj_perms_cache = {}
def has_perm(self, perm, obj):
"""
Checks if user/group has given permission for object.
:param perm: permission as string, may or may not contain app_label
prefix (if not prefixed, we grab app_label from ``obj``)
:param obj: Django model instance for which permission should be checked
"""
perm = perm.split('.')[-1]
if self.user and not self.user.is_active:
return False
elif self.user and self.user.is_superuser:
return True
return perm in self.get_perms(obj)
def get_perms(self, obj):
"""
Returns list of ``codename``'s of all permissions for given ``obj``.
:param obj: Django model instance for which permission should be checked
"""
User = get_user_model()
ctype = ContentType.objects.get_for_model(obj)
key = self.get_local_cache_key(obj)
if not key in self._obj_perms_cache:
group_model = get_group_obj_perms_model(obj)
group_rel_name = group_model.permission.field.related_query_name()
if self.user:
fieldname = '%s__group__%s' % (
group_rel_name,
User.groups.field.related_query_name(),
)
group_filters = {fieldname: self.user}
else:
group_filters = {'%s__group' % group_rel_name: self.group}
if group_model.objects.is_generic():
group_filters.update({
'%s__content_type' % group_rel_name: ctype,
'%s__object_pk' % group_rel_name: obj.pk,
})
else:
group_filters['%s__content_object' % group_rel_name] = obj
if self.user and not self.user.is_active:
return []
elif self.user and self.user.is_superuser:
perms = list(chain(*Permission.objects
.filter(content_type=ctype)
.values_list("codename")))
elif self.user:
model = get_user_obj_perms_model(obj)
related_name = model.permission.field.related_query_name()
user_filters = {'%s__user' % related_name: self.user}
if model.objects.is_generic():
user_filters.update({
'%s__content_type' % related_name: ctype,
'%s__object_pk' % related_name: obj.pk,
})
else:
user_filters['%s__content_object' % related_name] = obj
perms_qs = Permission.objects.filter(content_type=ctype)
# Query user and group permissions separately and then combine
# the results to avoid a slow query
user_perms_qs = perms_qs.filter(**user_filters)
user_perms = user_perms_qs.values_list("codename", flat=True)
group_perms_qs = perms_qs.filter(**group_filters)
group_perms = group_perms_qs.values_list("codename", flat=True)
perms = list(set(chain(user_perms, group_perms)))
else:
perms = list(set(chain(*Permission.objects
.filter(content_type=ctype)
.filter(**group_filters)
.values_list("codename"))))
self._obj_perms_cache[key] = perms
return self._obj_perms_cache[key]
def get_local_cache_key(self, obj):
"""
Returns cache key for ``_obj_perms_cache`` dict.
"""
ctype = ContentType.objects.get_for_model(obj)
return (ctype.id, obj.pk)
| bsd-2-clause |
yograterol/django | tests/auth_tests/test_basic.py | 328 | 4643 | from __future__ import unicode_literals
from django.apps import apps
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser, User
from django.contrib.auth.tests.custom_user import CustomUser
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import receiver
from django.test import TestCase, override_settings
from django.test.signals import setting_changed
from django.utils import translation
@receiver(setting_changed)
def user_model_swapped(**kwargs):
if kwargs['setting'] == 'AUTH_USER_MODEL':
from django.db.models.manager import ensure_default_manager
# Reset User manager
setattr(User, 'objects', User._default_manager)
ensure_default_manager(User)
apps.clear_cache()
class BasicTestCase(TestCase):
def test_user(self):
"Check that users can be created and can set their password"
u = User.objects.create_user('testuser', '[email protected]', 'testpw')
self.assertTrue(u.has_usable_password())
self.assertFalse(u.check_password('bad'))
self.assertTrue(u.check_password('testpw'))
# Check we can manually set an unusable password
u.set_unusable_password()
u.save()
self.assertFalse(u.check_password('testpw'))
self.assertFalse(u.has_usable_password())
u.set_password('testpw')
self.assertTrue(u.check_password('testpw'))
u.set_password(None)
self.assertFalse(u.has_usable_password())
# Check username getter
self.assertEqual(u.get_username(), 'testuser')
# Check authentication/permissions
self.assertTrue(u.is_authenticated())
self.assertFalse(u.is_staff)
self.assertTrue(u.is_active)
self.assertFalse(u.is_superuser)
# Check API-based user creation with no password
u2 = User.objects.create_user('testuser2', '[email protected]')
self.assertFalse(u2.has_usable_password())
def test_user_no_email(self):
"Check that users can be created without an email"
u = User.objects.create_user('testuser1')
self.assertEqual(u.email, '')
u2 = User.objects.create_user('testuser2', email='')
self.assertEqual(u2.email, '')
u3 = User.objects.create_user('testuser3', email=None)
self.assertEqual(u3.email, '')
def test_anonymous_user(self):
"Check the properties of the anonymous user"
a = AnonymousUser()
self.assertEqual(a.pk, None)
self.assertEqual(a.username, '')
self.assertEqual(a.get_username(), '')
self.assertFalse(a.is_authenticated())
self.assertFalse(a.is_staff)
self.assertFalse(a.is_active)
self.assertFalse(a.is_superuser)
self.assertEqual(a.groups.all().count(), 0)
self.assertEqual(a.user_permissions.all().count(), 0)
def test_superuser(self):
"Check the creation and properties of a superuser"
super = User.objects.create_superuser('super', '[email protected]', 'super')
self.assertTrue(super.is_superuser)
self.assertTrue(super.is_active)
self.assertTrue(super.is_staff)
def test_get_user_model(self):
"The current user model can be retrieved"
self.assertEqual(get_user_model(), User)
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user(self):
"The current user model can be swapped out for another"
self.assertEqual(get_user_model(), CustomUser)
with self.assertRaises(AttributeError):
User.objects.all()
@override_settings(AUTH_USER_MODEL='badsetting')
def test_swappable_user_bad_setting(self):
"The alternate user setting must point to something in the format app.model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
@override_settings(AUTH_USER_MODEL='thismodel.doesntexist')
def test_swappable_user_nonexistent_model(self):
"The current user model must point to an installed model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
def test_user_verbose_names_translatable(self):
"Default User model verbose names are translatable (#19945)"
with translation.override('en'):
self.assertEqual(User._meta.verbose_name, 'user')
self.assertEqual(User._meta.verbose_name_plural, 'users')
with translation.override('es'):
self.assertEqual(User._meta.verbose_name, 'usuario')
self.assertEqual(User._meta.verbose_name_plural, 'usuarios')
| bsd-3-clause |
dhermes/google-cloud-python | tasks/noxfile.py | 34 | 4095 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
LOCAL_DEPS = (os.path.join("..", "api_core"), os.path.join("..", "core"))
@nox.session(python="3.7")
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", "black", *LOCAL_DEPS)
session.run(
"black",
"--check",
"google",
"tests",
"docs",
)
session.run("flake8", "google", "tests")
@nox.session(python="3.6")
def blacken(session):
"""Run black.
Format code to uniform standard.
"""
session.install("black")
session.run(
"black",
"google",
"tests",
"docs",
)
@nox.session(python="3.7")
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
session.install("mock", "pytest", "pytest-cov")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", ".")
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
"--cov=google.cloud",
"--cov=tests.unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=97",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=["2.7", "3.5", "3.6", "3.7"])
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=["2.7", "3.7"])
def system(session):
"""Run the system test suite."""
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Sanity check: Only run tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest")
for local_dep in LOCAL_DEPS:
session.install("-e", local_dep)
session.install("-e", "../test_utils/")
session.install("-e", ".")
# Run py.test against the system tests.
if system_test_exists:
session.run("py.test", "--quiet", system_test_path, *session.posargs)
if system_test_folder_exists:
session.run("py.test", "--quiet", system_test_folder_path, *session.posargs)
@nox.session(python="3.7")
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
| apache-2.0 |
yestech/gae-django-template | django/contrib/localflavor/fr/forms.py | 309 | 1747 | """
FR-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
import re
phone_digits_re = re.compile(r'^0\d(\s|\.)?(\d{2}(\s|\.)?){3}\d{2}$')
class FRZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXXX.'),
}
def __init__(self, *args, **kwargs):
super(FRZipCodeField, self).__init__(r'^\d{5}$',
max_length=None, min_length=None, *args, **kwargs)
class FRPhoneNumberField(Field):
"""
Validate local French phone number (not international ones)
The correct format is '0X XX XX XX XX'.
'0X.XX.XX.XX.XX' and '0XXXXXXXXX' validate but are corrected to
'0X XX XX XX XX'.
"""
default_error_messages = {
'invalid': _('Phone numbers must be in 0X XX XX XX XX format.'),
}
def clean(self, value):
super(FRPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\.|\s)', '', smart_unicode(value))
m = phone_digits_re.search(value)
if m:
return u'%s %s %s %s %s' % (value[0:2], value[2:4], value[4:6], value[6:8], value[8:10])
raise ValidationError(self.error_messages['invalid'])
class FRDepartmentSelect(Select):
"""
A Select widget that uses a list of FR departments as its choices.
"""
def __init__(self, attrs=None):
from fr_department import DEPARTMENT_ASCII_CHOICES
super(FRDepartmentSelect, self).__init__(attrs, choices=DEPARTMENT_ASCII_CHOICES)
| bsd-3-clause |
msabramo/ansible | lib/ansible/modules/packaging/os/pacman.py | 5 | 15028 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2012, Afterburn <http://github.com/afterburn>
# (c) 2013, Aaron Bull Schaefer <[email protected]>
# (c) 2015, Indrajit Raychaudhuri <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pacman
short_description: Manage packages with I(pacman)
description:
- Manage packages with the I(pacman) package manager, which is used by
Arch Linux and its variants.
version_added: "1.0"
author:
- "Indrajit Raychaudhuri (@indrajitr)"
- "'Aaron Bull Schaefer (@elasticdog)' <[email protected]>"
- "Afterburn"
notes: []
requirements: []
options:
name:
description:
- Name of the package to install, upgrade, or remove.
required: false
default: null
aliases: [ 'pkg', 'package' ]
state:
description:
- Desired state of the package.
required: false
default: "present"
choices: ["present", "absent", "latest"]
recurse:
description:
- When removing a package, also remove its dependencies, provided
that they are not required by other packages and were not
explicitly installed by a user.
required: false
default: no
choices: ["yes", "no"]
version_added: "1.3"
force:
description:
- When removing package - force remove package, without any
checks. When update_cache - force redownload repo
databases.
required: false
default: no
choices: ["yes", "no"]
version_added: "2.0"
update_cache:
description:
- Whether or not to refresh the master package lists. This can be
run as part of a package installation or as a separate step.
required: false
default: no
choices: ["yes", "no"]
aliases: [ 'update-cache' ]
upgrade:
description:
- Whether or not to upgrade whole system
required: false
default: no
choices: ["yes", "no"]
version_added: "2.0"
'''
RETURN = '''
packages:
description: a list of packages that have been changed
returned: when upgrade is set to yes
type: list of strings
sample: ['package', 'other-package']
'''
EXAMPLES = '''
# Install package foo
- pacman:
name: foo
state: present
# Upgrade package foo
- pacman:
name: foo
state: latest
update_cache: yes
# Remove packages foo and bar
- pacman:
name: foo,bar
state: absent
# Recursively remove package baz
- pacman:
name: baz
state: absent
recurse: yes
# Run the equivalent of "pacman -Sy" as a separate step
- pacman:
update_cache: yes
# Run the equivalent of "pacman -Su" as a separate step
- pacman:
upgrade: yes
# Run the equivalent of "pacman -Syu" as a separate step
- pacman:
update_cache: yes
upgrade: yes
# Run the equivalent of "pacman -Rdd", force remove package baz
- pacman:
name: baz
state: absent
force: yes
'''
import shlex
import os
import re
import sys
def get_version(pacman_output):
"""Take pacman -Qi or pacman -Si output and get the Version"""
lines = pacman_output.split('\n')
for line in lines:
if 'Version' in line:
return line.split(':')[1].strip()
return None
def query_package(module, pacman_path, name, state="present"):
"""Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, a second boolean to indicate if the package is up-to-date and a third boolean to indicate whether online information were available"""
if state == "present":
lcmd = "%s -Qi %s" % (pacman_path, name)
lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
if lrc != 0:
# package is not installed locally
return False, False, False
# get the version installed locally (if any)
lversion = get_version(lstdout)
rcmd = "%s -Si %s" % (pacman_path, name)
rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
# get the version in the repository
rversion = get_version(rstdout)
if rrc == 0:
# Return True to indicate that the package is installed locally, and the result of the version number comparison
# to determine if the package is up-to-date.
return True, (lversion == rversion), False
# package is installed but cannot fetch remote Version. Last True stands for the error
return True, True, True
def update_package_db(module, pacman_path):
if module.params["force"]:
args = "Syy"
else:
args = "Sy"
cmd = "%s -%s" % (pacman_path, args)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
module.fail_json(msg="could not update package db")
def upgrade(module, pacman_path):
cmdupgrade = "%s -Suq --noconfirm" % (pacman_path)
cmdneedrefresh = "%s -Qu" % (pacman_path)
rc, stdout, stderr = module.run_command(cmdneedrefresh, check_rc=False)
data = stdout.split('\n')
data.remove('')
packages = []
diff = {
'before': '',
'after': '',
}
if rc == 0:
regex = re.compile('(\w+) ((?:\S+)-(?:\S+)) -> ((?:\S+)-(?:\S+))')
b = []
a = []
for p in data:
m = regex.search(p)
packages.append(m.group(1))
if module._diff:
diff['before'] += "%s-%s\n" % (m.group(1), m.group(2))
diff['after'] += "%s-%s\n" % (m.group(1), m.group(3))
if module.check_mode:
module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data)), packages=packages, diff=diff)
rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
if rc == 0:
module.exit_json(changed=True, msg='System upgraded', packages=packages, diff=diff)
else:
module.fail_json(msg="Could not upgrade")
else:
module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages)
def remove_packages(module, pacman_path, packages):
data = []
diff = {
'before': '',
'after': '',
}
if module.params["recurse"] or module.params["force"]:
if module.params["recurse"]:
args = "Rs"
if module.params["force"]:
args = "Rdd"
if module.params["recurse"] and module.params["force"]:
args = "Rdds"
else:
args = "R"
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
installed, updated, unknown = query_package(module, pacman_path, package)
if not installed:
continue
cmd = "%s -%s %s --noconfirm --noprogressbar" % (pacman_path, args, package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if module._diff:
d = stdout.split('\n')[2].split(' ')[2:]
for i, pkg in enumerate(d):
d[i] = re.sub('-[0-9].*$', '', d[i].split('/')[-1])
diff['before'] += "%s\n" % pkg
data.append('\n'.join(d))
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c, diff=diff)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, pacman_path, state, packages, package_files):
install_c = 0
package_err = []
message = ""
data = []
diff = {
'before': '',
'after': '',
}
to_install_repos = []
to_install_files = []
for i, package in enumerate(packages):
# if the package is installed and state == present or state == latest and is up-to-date then skip
installed, updated, latestError = query_package(module, pacman_path, package)
if latestError and state == 'latest':
package_err.append(package)
if installed and (state == 'present' or (state == 'latest' and updated)):
continue
if package_files[i]:
to_install_files.append(package_files[i])
else:
to_install_repos.append(package)
if to_install_repos:
cmd = "%s -S %s --noconfirm --noprogressbar --needed" % (pacman_path, " ".join(to_install_repos))
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
data = stdout.split('\n')[3].split(' ')[2:]
data = [ i for i in data if i != '' ]
for i, pkg in enumerate(data):
data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1])
if module._diff:
diff['after'] += "%s\n" % pkg
if rc != 0:
module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_repos), stderr))
install_c += len(to_install_repos)
if to_install_files:
cmd = "%s -U %s --noconfirm --noprogressbar --needed" % (pacman_path, " ".join(to_install_files))
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
data = stdout.split('\n')[3].split(' ')[2:]
data = [ i for i in data if i != '' ]
for i, pkg in enumerate(data):
data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1])
if module._diff:
diff['after'] += "%s\n" % pkg
if rc != 0:
module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_files), stderr))
install_c += len(to_install_files)
if state == 'latest' and len(package_err) > 0:
message = "But could not ensure 'latest' state for %s package(s) as remote version could not be fetched." % (package_err)
if install_c > 0:
module.exit_json(changed=True, msg="installed %s package(s). %s" % (install_c, message), diff=diff)
module.exit_json(changed=False, msg="package(s) already installed. %s" % (message), diff=diff)
def check_packages(module, pacman_path, packages, state):
would_be_changed = []
diff = {
'before': '',
'after': '',
'before_header': '',
'after_header': ''
}
for package in packages:
installed, updated, unknown = query_package(module, pacman_path, package)
if ((state in ["present", "latest"] and not installed) or
(state == "absent" and installed) or
(state == "latest" and not updated)):
would_be_changed.append(package)
if would_be_changed:
if state == "absent":
state = "removed"
if module._diff and (state == 'removed'):
diff['before_header'] = 'removed'
diff['before'] = '\n'.join(would_be_changed) + '\n'
elif module._diff and ((state == 'present') or (state == 'latest')):
diff['after_header'] = 'installed'
diff['after'] = '\n'.join(would_be_changed) + '\n'
module.exit_json(changed=True, msg="%s package(s) would be %s" % (
len(would_be_changed), state), diff=diff)
else:
module.exit_json(changed=False, msg="package(s) already %s" % state, diff=diff)
def expand_package_groups(module, pacman_path, pkgs):
expanded = []
for pkg in pkgs:
cmd = "%s -Sgq %s" % (pacman_path, pkg)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
# A group was found matching the name, so expand it
for name in stdout.split('\n'):
name = name.strip()
if name:
expanded.append(name)
else:
expanded.append(pkg)
return expanded
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(aliases=['pkg', 'package'], type='list'),
state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']),
recurse = dict(default=False, type='bool'),
force = dict(default=False, type='bool'),
upgrade = dict(default=False, type='bool'),
update_cache = dict(default=False, aliases=['update-cache'], type='bool')
),
required_one_of = [['name', 'update_cache', 'upgrade']],
supports_check_mode = True)
pacman_path = module.get_bin_path('pacman', True)
p = module.params
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
elif p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p["update_cache"] and not module.check_mode:
update_package_db(module, pacman_path)
if not (p['name'] or p['upgrade']):
module.exit_json(changed=True, msg='Updated the package master lists')
if p['update_cache'] and module.check_mode and not (p['name'] or p['upgrade']):
module.exit_json(changed=True, msg='Would have updated the package cache')
if p['upgrade']:
upgrade(module, pacman_path)
if p['name']:
pkgs = expand_package_groups(module, pacman_path, p['name'])
pkg_files = []
for i, pkg in enumerate(pkgs):
if re.match(".*\.pkg\.tar(\.(gz|bz2|xz|lrz|lzo|Z))?$", pkg):
# The package given is a filename, extract the raw pkg name from
# it and store the filename
pkg_files.append(pkg)
pkgs[i] = re.sub('-[0-9].*$', '', pkgs[i].split('/')[-1])
else:
pkg_files.append(None)
if module.check_mode:
check_packages(module, pacman_path, pkgs, p['state'])
if p['state'] in ['present', 'latest']:
install_packages(module, pacman_path, p['state'], pkgs, pkg_files)
elif p['state'] == 'absent':
remove_packages(module, pacman_path, pkgs)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == "__main__":
main()
| gpl-3.0 |
rishibarve/incubator-airflow | tests/jobs.py | 1 | 61326 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
import os
import shutil
import unittest
import six
import socket
from tempfile import mkdtemp
from airflow import AirflowException, settings, models
from airflow.bin import cli
from airflow.executors import SequentialExecutor
from airflow.jobs import BackfillJob, SchedulerJob, LocalTaskJob
from airflow.models import DAG, DagModel, DagBag, DagRun, Pool, TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash_operator import BashOperator
from airflow.utils.db import provide_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.dag_processing import SimpleDagBag
from mock import patch
from sqlalchemy.orm.session import make_transient
from tests.executors.test_executor import TestExecutor
from tests.core import TEST_DAG_FOLDER
from airflow import configuration
configuration.load_test_config()
import sqlalchemy
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
DEV_NULL = '/dev/null'
DEFAULT_DATE = datetime.datetime(2016, 1, 1)
# Include the words "airflow" and "dag" in the file contents, tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
class BackfillJobTest(unittest.TestCase):
def setUp(self):
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(include_examples=True)
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
dag.clear()
target_dag.clear()
scheduler = SchedulerJob()
queue = mock.Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertFalse(queue.append.called)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True
)
job.run()
scheduler = SchedulerJob()
queue = mock.Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertTrue(queue.append.called)
target_dag.clear()
dag.clear()
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('example_bash_operator')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
ignore_first_depends_on_past=True
)
job.run()
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id=='example_bash_operator'
).order_by(DagRun.execution_date).all()
self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
self.assertTrue(drs[0].state == State.SUCCESS)
self.assertTrue(drs[1].execution_date ==
DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(drs[1].state == State.SUCCESS)
dag.clear()
session.close()
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_examples(self):
"""
Test backfilling example dags
"""
# some DAGs really are just examples... but try to make them work!
skip_dags = [
'example_http_operator',
'example_twitter_dag',
'example_trigger_target_dag',
'example_trigger_controller_dag', # tested above
'test_utils', # sleeps forever
]
logger = logging.getLogger('BackfillJobTest.test_backfill_examples')
dags = [
dag for dag in self.dagbag.dags.values()
if 'example_dags' in dag.full_filepath and dag.dag_id not in skip_dags
]
for dag in dags:
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
for i, dag in enumerate(sorted(dags, key=lambda d: d.dag_id)):
logger.info('*** Running example DAG #{}: {}'.format(i, dag.dag_id))
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True)
job.run()
def test_backfill_ordered_concurrent_execute(self):
dag = DAG(
dag_id='test_backfill_ordered_concurrent_execute',
start_date=DEFAULT_DATE,
schedule_interval="@daily")
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
# test executor history keeps a list
history = executor.history
# check if right order. Every loop has a 'pause' (0) to change state
# from RUNNING to SUCCESS.
# 6,0,3,0,3,0,3,0 = 8 loops
self.assertEqual(8, len(history))
loop_count = 0
while len(history) > 0:
queued_tasks = history.pop(0)
if loop_count == 0:
# first loop should contain 6 tasks (3 days x 2 tasks)
self.assertEqual(6, len(queued_tasks))
if loop_count == 2 or loop_count == 4 or loop_count == 6:
# 3 days x 1 task
self.assertEqual(3, len(queued_tasks))
loop_count += 1
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
Test for https://github.com/airbnb/airflow/pull/1225
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# run with timeout because this creates an infinite loop if not
# caught
with timeout(seconds=30):
job.run()
ti = TI(
task=dag.get_task('test_backfill_pooled_task'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_depends_on_past(self):
"""
Test that backfill respects ignore_depends_on_past
"""
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
# backfill should deadlock
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
BackfillJob(dag=dag, start_date=run_date, end_date=run_date).run)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_cli_backfill_depends_on_past(self):
"""
Test that CLI respects -I argument
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
run_date.isoformat(),
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
cli.backfill,
self.parser.parse_args(args))
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_depends_on_past'), run_date)
ti.refresh_from_db()
# task ran
self.assertEqual(ti.state, State.SUCCESS)
dag.clear()
def test_sub_set_subdag(self):
dag = DAG(
'test_sub_set_subdag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
sub_dag = dag.sub_dag(task_regex="leave*",
include_downstream=False,
include_upstream=False)
job = BackfillJob(dag=sub_dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
job.run()
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(BackfillJob.ID_FORMAT_PREFIX.format(DEFAULT_DATE.isoformat()),
dr.run_id)
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
self.assertEqual(State.SUCCESS, ti.state)
else:
self.assertEqual(State.NONE, ti.state)
def test_backfill_fill_blanks(self):
dag = DAG(
'test_backfill_fill_blanks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'},
)
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dag.clear()
dr = dag.create_dagrun(run_id='test',
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
self.assertRaisesRegexp(
AirflowException,
'Some task instances failed',
job.run)
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(dr.state, State.FAILED)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == op2.task_id:
self.assertEqual(ti.state, State.FAILED)
elif ti.task_id == op3.task_id:
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == op5.task_id:
self.assertEqual(ti.state, State.UPSTREAM_FAILED)
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
start_date = datetime.datetime.now()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=start_date,
end_date=start_date,
executor=executor,
donot_pickle=True)
job.run()
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
self.assertEqual(5, len(subdag_history))
for sdh in subdag_history:
ti = sdh[3]
self.assertIn('section-1-task-', ti.task_id)
subdag.clear()
dag.clear()
class LocalTaskJobTest(unittest.TestCase):
def setUp(self):
pass
@patch.object(LocalTaskJob, "_is_descendant_process")
def test_localtaskjob_heartbeat(self, is_descendant):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
is_descendant.return_value = True
ti.state = State.RUNNING
ti.hostname = socket.getfqdn()
ti.pid = 1
session.merge(ti)
session.commit()
ret = job1.heartbeat_callback()
self.assertEqual(ret, None)
is_descendant.return_value = False
self.assertRaises(AirflowException, job1.heartbeat_callback)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = socket.getfqdn()
ti.pid = 1
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
job1 = LocalTaskJob(task_instance=ti_run, ignore_ti_state=True, executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.run)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
class SchedulerJobTest(unittest.TestCase):
# These defaults make the test faster to run
default_scheduler_args = {"file_process_interval": 0,
"processor_poll_interval": 0.5}
def setUp(self):
self.dagbag = DagBag()
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.ImportError).delete()
session.commit()
@staticmethod
def run_single_scheduler_loop_with_no_dags(dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type directory: str
"""
scheduler = SchedulerJob(
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def test_concurrency(self):
dag_id = 'SchedulerJobTest.test_concurrency'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = SimpleDagBag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(2, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING], session=session))
# create second dag run
dr2 = scheduler.create_dag_run(dag)
ti3 = TI(task1, dr2.execution_date)
ti4 = TI(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(3, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING, State.QUEUED], session=session))
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
session.close()
def test_execute_helper_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_id=BackfillJob.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
processor.get_last_finish_time.return_value = None
scheduler = SchedulerJob(num_runs=0, run_duration=0)
executor = TestExecutor()
scheduler.executor = executor
scheduler._execute_helper(processor_manager=processor)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None):
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob(**self.default_scheduler_args)
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
try:
dag.run(start_date=ex_date, end_date=ex_date, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# Run both the failed and successful tasks
scheduler = SchedulerJob(**self.default_scheduler_args)
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
try:
dag.run(start_date=dr.execution_date, end_date=dr.execution_date)
except AirflowException: # Expect an exception since there is a failed task
pass
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
session = settings.Session()
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr_state = dr.update_state()
self.assertEqual(dr_state, State.RUNNING)
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
scheduler = SchedulerJob(dag_id,
num_runs=2,
**self.default_scheduler_args)
scheduler.run()
# zero tasks ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
backfill = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
scheduler = SchedulerJob(dag_id,
num_runs=2,
**self.default_scheduler_args)
scheduler.run()
# still one task
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
file_process_interval=0,
processor_poll_interval=0.5,
num_runs=2)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=datetime.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_process_task_instances(self):
"""
Test if _process_task_instances puts the right task instances into the
queue.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=datetime.datetime(2200, 1, 1))
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 1)
dag_task2 = DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = datetime.datetime.now() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEquals(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = datetime.datetime.now() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
# First create up to 3 dagruns in RUNNING state.
scheduler.create_dag_run(dag)
# Reduce max_active_runs to 1
dag.max_active_runs = 1
queue = mock.Mock()
# and schedule them in, so we can check how many
# tasks are put on the queue (should be one, not 3)
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE)
)
@patch.object(TI, 'pool_full')
def test_scheduler_verify_pool_full(self, mock_pool_full):
"""
Test task instances not queued when pool is full
"""
mock_pool_full.return_value = False
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
# Create 2 dagruns, which will create 2 task instances.
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, DEFAULT_DATE)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = []
scheduler._process_task_instances(dag, queue=queue)
self.assertEquals(len(queue), 2)
dagbag = SimpleDagBag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in queue:
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
scheduler._execute_task_instances(dagbag,
(State.SCHEDULED,
State.UP_FOR_RETRY))
self.assertEquals(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=datetime.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, datetime.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=datetime.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, datetime.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEquals(2, len(executor.queued_tasks))
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
def run_with_error(task):
try:
task.run()
except AirflowException:
pass
ti_tuple = six.next(six.itervalues(executor.queued_tasks))
(command, priority, queue, ti) = ti_tuple
ti.task = dag_task1
# fail execution
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 1)
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# do not schedule
do_schedule()
self.assertTrue(executor.has_task(ti))
ti.refresh_from_db()
self.assertEqual(ti.state, State.SCHEDULED)
# now the executor has cleared and it should be allowed the re-queue
executor.queued_tasks.clear()
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.QUEUED)
@unittest.skipUnless("INTEGRATION" in os.environ, "Can only run end to end")
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TI).filter(TI.dag_id==dag.dag_id,
TI.task_id==dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_scheduler_run_duration(self):
"""
Verifies that the scheduler run duration limit is followed.
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
expected_run_duration = 5
start_time = datetime.datetime.now()
scheduler = SchedulerJob(dag_id,
run_duration=expected_run_duration,
**self.default_scheduler_args)
scheduler.run()
end_time = datetime.datetime.now()
run_duration = (end_time - start_time).total_seconds()
logging.info("Test ran in %.2fs, expected %.2fs",
run_duration,
expected_run_duration)
self.assertLess(run_duration - expected_run_duration, 5.0)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER,
"..",
"dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir= dag_directory,
num_runs=1,
**self.default_scheduler_args)
scheduler.run()
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns it's active runs
"""
now = datetime.datetime.now()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag1.clear()
dr = scheduler.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except:
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
now = datetime.datetime.now()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
three_minutes_ago = now - datetime.timedelta(minutes=3)
two_hours_and_three_minutes_ago = three_minutes_ago - datetime.timedelta(hours=2)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'no_catchup_test1'
DAG_NAME2 = 'no_catchup_test2'
DAG_NAME3 = 'no_catchup_test3'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
default_catchup = configuration.getboolean('scheduler', 'catchup_by_default')
# Test configs have catchup by default ON
self.assertEqual(default_catchup, True)
# Correct default?
self.assertEqual(dag1.catchup, True)
dag2 = DAG(DAG_NAME2,
schedule_interval='* * * * *',
max_active_runs=1,
catchup=False,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag2)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag2)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag2)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag2.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag2.clear()
dr = scheduler.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 3 minutes, not 6 hours ago
self.assertGreater(dr.execution_date, three_minutes_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, datetime.datetime.now())
dag3 = DAG(DAG_NAME3,
schedule_interval='@hourly',
max_active_runs=1,
catchup=False,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag3)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag3)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag3)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag3.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag3.clear()
dr = None
dr = scheduler.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last two hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_and_three_minutes_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, datetime.datetime.now())
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
| apache-2.0 |
leductan-nguyen/RaionPi | src/octoprint/plugin/core.py | 1 | 42607 | # coding=utf-8
"""
In this module resides the core data structures and logic of the plugin system. It is implemented in an RaionPi-agnostic
way and could be extracted into a separate Python module in the future.
.. autoclass:: PluginManager
:members:
.. autoclass:: PluginInfo
:members:
.. autoclass:: Plugin
:members:
"""
from __future__ import absolute_import
__author__ = "Gina Häußge <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The RaionPi Project - Released under terms of the AGPLv3 License"
import os
import imp
from collections import defaultdict, namedtuple
import logging
import pkg_resources
import pkginfo
EntryPointOrigin = namedtuple("EntryPointOrigin", "type, entry_point, module_name, package_name, package_version")
FolderOrigin = namedtuple("FolderOrigin", "type, folder")
class PluginInfo(object):
"""
The :class:`PluginInfo` class wraps all available information about a registered plugin.
This includes its meta data (like name, description, version, etc) as well as the actual plugin extensions like
implementations, hooks and helpers.
It works on Python module objects and extracts the relevant data from those via accessing the
:ref:`control properties <sec-plugin-concepts-controlproperties>`.
Arguments:
key (str): Identifier of the plugin
location (str): Installation folder of the plugin
instance (module): Plugin module instance
name (str): Human readable name of the plugin
version (str): Version of the plugin
description (str): Description of the plugin
author (str): Author of the plugin
url (str): URL of the website of the plugin
license (str): License of the plugin
"""
attr_name = '__plugin_name__'
""" Module attribute from which to retrieve the plugin's human readable name. """
attr_description = '__plugin_description__'
""" Module attribute from which to retrieve the plugin's description. """
attr_version = '__plugin_version__'
""" Module attribute from which to retrieve the plugin's version. """
attr_author = '__plugin_author__'
""" Module attribute from which to retrieve the plugin's author. """
attr_url = '__plugin_url__'
""" Module attribute from which to retrieve the plugin's website URL. """
attr_license = '__plugin_license__'
""" Module attribute from which to retrieve the plugin's license. """
attr_hooks = '__plugin_hooks__'
""" Module attribute from which to retrieve the plugin's provided hooks. """
attr_implementation = '__plugin_implementation__'
""" Module attribute from which to retrieve the plugin's provided mixin implementation. """
attr_implementations = '__plugin_implementations__'
"""
Module attribute from which to retrieve the plugin's provided implementations.
This deprecated attribute will only be used if a plugin does not yet offer :attr:`attr_implementation`. Only the
first entry will be evaluated.
.. deprecated:: 1.2.0-dev-694
Use :attr:`attr_implementation` instead.
"""
attr_helpers = '__plugin_helpers__'
""" Module attribute from which to retrieve the plugin's provided helpers. """
attr_check = '__plugin_check__'
""" Module attribute which to call to determine if the plugin can be loaded. """
attr_init = '__plugin_init__'
"""
Module attribute which to call when loading the plugin.
This deprecated attribute will only be used if a plugin does not yet offer :attr:`attr_load`.
.. deprecated:: 1.2.0-dev-720
Use :attr:`attr_load` instead.
"""
attr_load = '__plugin_load__'
""" Module attribute which to call when loading the plugin. """
attr_unload = '__plugin_unload__'
""" Module attribute which to call when unloading the plugin. """
attr_enable = '__plugin_enable__'
""" Module attribute which to call when enabling the plugin. """
attr_disable = '__plugin_disable__'
""" Module attribute which to call when disabling the plugin. """
def __init__(self, key, location, instance, name=None, version=None, description=None, author=None, url=None, license=None):
self.key = key
self.location = location
self.instance = instance
self.origin = None
self.enabled = True
self.bundled = False
self.loaded = False
self._name = name
self._version = version
self._description = description
self._author = author
self._url = url
self._license = license
def validate(self, phase, additional_validators=None):
if phase == "before_load":
# if the plugin still uses __plugin_init__, log a deprecation warning and move it to __plugin_load__
if hasattr(self.instance, self.__class__.attr_init):
if not hasattr(self.instance, self.__class__.attr_load):
# deprecation warning
import warnings
warnings.warn("{name} uses deprecated control property __plugin_init__, use __plugin_load__ instead".format(name=self.key), DeprecationWarning)
# move it
init = getattr(self.instance, self.__class__.attr_init)
setattr(self.instance, self.__class__.attr_load, init)
# delete __plugin_init__
delattr(self.instance, self.__class__.attr_init)
elif phase == "after_load":
# if the plugin still uses __plugin_implementations__, log a deprecation warning and put the first
# item into __plugin_implementation__
if hasattr(self.instance, self.__class__.attr_implementations):
if not hasattr(self.instance, self.__class__.attr_implementation):
# deprecation warning
import warnings
warnings.warn("{name} uses deprecated control property __plugin_implementations__, use __plugin_implementation__ instead - only the first implementation of {name} will be recognized".format(name=self.key), DeprecationWarning)
# put first item into __plugin_implementation__
implementations = getattr(self.instance, self.__class__.attr_implementations)
if len(implementations) > 0:
setattr(self.instance, self.__class__.attr_implementation, implementations[0])
# delete __plugin_implementations__
delattr(self.instance, self.__class__.attr_implementations)
if additional_validators is not None:
for validator in additional_validators:
validator(phase, self)
def __str__(self):
if self.version:
return "{name} ({version})".format(name=self.name, version=self.version)
else:
return self.name
def long_str(self, show_bundled=False, bundled_strs=(" [B]", ""),
show_location=False, location_str=" - {location}",
show_enabled=False, enabled_strs=("* ", " ")):
"""
Long string representation of the plugin's information. Will return a string of the format ``<enabled><str(self)><bundled><location>``.
``enabled``, ``bundled`` and ``location`` will only be displayed if the corresponding flags are set to ``True``.
The will be filled from ``enabled_str``, ``bundled_str`` and ``location_str`` as follows:
``enabled_str``
a 2-tuple, the first entry being the string to insert when the plugin is enabled, the second
entry the string to insert when it is not.
``bundled_str``
a 2-tuple, the first entry being the string to insert when the plugin is bundled, the second
entry the string to insert when it is not.
``location_str``
a format string (to be parsed with ``str.format``), the ``{location}`` placeholder will be
replaced with the plugin's installation folder on disk.
Arguments:
show_enabled (boolean): whether to show the ``enabled`` part
enabled_strs (tuple): the 2-tuple containing the two possible strings to use for displaying the enabled state
show_bundled (boolean): whether to show the ``bundled`` part
bundled_strs(tuple): the 2-tuple containing the two possible strings to use for displaying the bundled state
show_location (boolean): whether to show the ``location`` part
location_str (str): the format string to use for displaying the plugin's installation location
Returns:
str: The long string representation of the plugin as described above
"""
if show_enabled:
ret = enabled_strs[0] if self.enabled else enabled_strs[1]
else:
ret = ""
ret += str(self)
if show_bundled:
ret += bundled_strs[0] if self.bundled else bundled_strs[1]
if show_location and self.location:
ret += location_str.format(location=self.location)
return ret
def get_hook(self, hook):
"""
Arguments:
hook (str): Hook to return.
Returns:
callable or None: Handler for the requested ``hook`` or None if no handler is registered.
"""
if not hook in self.hooks:
return None
return self.hooks[hook]
def get_implementation(self, *types):
"""
Arguments:
types (list): List of :class:`Plugin` sub classes all returned implementations need to implement.
Returns:
object: The plugin's implementation if it matches all of the requested ``types``, None otherwise.
"""
if not self.implementation:
return None
for t in types:
if not isinstance(self.implementation, t):
return None
return self.implementation
@property
def name(self):
"""
Human readable name of the plugin. Will be taken from name attribute of the plugin module if available,
otherwise from the ``name`` supplied during construction with a fallback to ``key``.
Returns:
str: Name of the plugin, fallback is the plugin's identifier.
"""
return self._get_instance_attribute(self.__class__.attr_name, defaults=(self._name, self.key))
@property
def description(self):
"""
Description of the plugin. Will be taken from the description attribute of the plugin module as defined in
:attr:`attr_description` if available, otherwise from the ``description`` supplied during construction.
May be None.
Returns:
str or None: Description of the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_description, default=self._description)
@property
def version(self):
"""
Version of the plugin. Will be taken from the version attribute of the plugin module as defined in
:attr:`attr_version` if available, otherwise from the ``version`` supplied during construction. May be None.
Returns:
str or None: Version of the plugin.
"""
return self._version if self._version is not None else self._get_instance_attribute(self.__class__.attr_version, default=self._version)
@property
def author(self):
"""
Author of the plugin. Will be taken from the author attribute of the plugin module as defined in
:attr:`attr_author` if available, otherwise from the ``author`` supplied during construction. May be None.
Returns:
str or None: Author of the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_author, default=self._author)
@property
def url(self):
"""
Website URL for the plugin. Will be taken from the url attribute of the plugin module as defined in
:attr:`attr_url` if available, otherwise from the ``url`` supplied during construction. May be None.
Returns:
str or None: Website URL for the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_url, default=self._url)
@property
def license(self):
"""
License of the plugin. Will be taken from the license attribute of the plugin module as defined in
:attr:`attr_license` if available, otherwise from the ``license`` supplied during construction. May be None.
Returns:
str or None: License of the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_license, default=self._license)
@property
def hooks(self):
"""
Hooks provided by the plugin. Will be taken from the hooks attribute of the plugin module as defiend in
:attr:`attr_hooks` if available, otherwise an empty dictionary is returned.
Returns:
dict: Hooks provided by the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_hooks, default={})
@property
def implementation(self):
"""
Implementation provided by the plugin. Will be taken from the implementation attribute of the plugin module
as defined in :attr:`attr_implementation` if available, otherwise None is returned.
Returns:
object: Implementation provided by the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_implementation, default=None)
@property
def helpers(self):
"""
Helpers provided by the plugin. Will be taken from the helpers attribute of the plugin module as defined in
:attr:`attr_helpers` if available, otherwise an empty list is returned.
Returns:
dict: Helpers provided by the plugin.
"""
return self._get_instance_attribute(self.__class__.attr_helpers, default={})
@property
def check(self):
"""
Method for pre-load check of plugin. Will be taken from the check attribute of the plugin module as defined in
:attr:`attr_check` if available, otherwise a lambda always returning True is returned.
Returns:
callable: Check method for the plugin module which should return True if the plugin can be loaded, False
otherwise.
"""
return self._get_instance_attribute(self.__class__.attr_check, default=lambda: True)
@property
def load(self):
"""
Method for loading the plugin module. Will be taken from the load attribute of the plugin module as defined
in :attr:`attr_load` if available, otherwise a no-operation lambda will be returned.
Returns:
callable: Load method for the plugin module.
"""
return self._get_instance_attribute(self.__class__.attr_load, default=lambda: True)
@property
def unload(self):
"""
Method for unloading the plugin module. Will be taken from the unload attribute of the plugin module as defined
in :attr:`attr_unload` if available, otherwise a no-operation lambda will be returned.
Returns:
callable: Unload method for the plugin module.
"""
return self._get_instance_attribute(self.__class__.attr_unload, default=lambda: True)
@property
def enable(self):
"""
Method for enabling the plugin module. Will be taken from the enable attribute of the plugin module as defined
in :attr:`attr_enable` if available, otherwise a no-operation lambda will be returned.
Returns:
callable: Enable method for the plugin module.
"""
return self._get_instance_attribute(self.__class__.attr_enable, default=lambda: True)
@property
def disable(self):
"""
Method for disabling the plugin module. Will be taken from the disable attribute of the plugin module as defined
in :attr:`attr_disable` if available, otherwise a no-operation lambda will be returned.
Returns:
callable: Disable method for the plugin module.
"""
return self._get_instance_attribute(self.__class__.attr_disable, default=lambda: True)
def _get_instance_attribute(self, attr, default=None, defaults=None):
if not hasattr(self.instance, attr):
if defaults is not None:
for value in defaults:
if value is not None:
return value
return default
return getattr(self.instance, attr)
class PluginManager(object):
"""
The :class:`PluginManager` is the central component for finding, loading and accessing plugins provided to the
system.
It is able to discover plugins both through possible file system locations as well as customizable entry points.
"""
def __init__(self, plugin_folders, plugin_types, plugin_entry_points, logging_prefix=None,
plugin_disabled_list=None, plugin_restart_needing_hooks=None, plugin_obsolete_hooks=None,
plugin_validators=None):
self.logger = logging.getLogger(__name__)
if logging_prefix is None:
logging_prefix = ""
if plugin_disabled_list is None:
plugin_disabled_list = []
self.plugin_folders = plugin_folders
self.plugin_types = plugin_types
self.plugin_entry_points = plugin_entry_points
self.plugin_disabled_list = plugin_disabled_list
self.plugin_restart_needing_hooks = plugin_restart_needing_hooks
self.plugin_obsolete_hooks = plugin_obsolete_hooks
self.plugin_validators = plugin_validators
self.logging_prefix = logging_prefix
self.enabled_plugins = dict()
self.disabled_plugins = dict()
self.plugin_hooks = defaultdict(list)
self.plugin_implementations = dict()
self.plugin_implementations_by_type = defaultdict(list)
self.implementation_injects = dict()
self.implementation_inject_factories = []
self.implementation_pre_inits = []
self.implementation_post_inits = []
self.on_plugin_loaded = lambda *args, **kwargs: None
self.on_plugin_unloaded = lambda *args, **kwargs: None
self.on_plugin_enabled = lambda *args, **kwargs: None
self.on_plugin_disabled = lambda *args, **kwargs: None
self.on_plugin_implementations_initialized = lambda *args, **kwargs: None
self.registered_clients = []
self.marked_plugins = defaultdict(list)
self.reload_plugins(startup=True, initialize_implementations=False)
@property
def plugins(self):
plugins = dict(self.enabled_plugins)
plugins.update(self.disabled_plugins)
return plugins
def find_plugins(self, existing=None, ignore_uninstalled=True):
if existing is None:
existing = dict(self.plugins)
result = dict()
if self.plugin_folders:
result.update(self._find_plugins_from_folders(self.plugin_folders, existing, ignored_uninstalled=ignore_uninstalled))
if self.plugin_entry_points:
existing.update(result)
result.update(self._find_plugins_from_entry_points(self.plugin_entry_points, existing, ignore_uninstalled=ignore_uninstalled))
return result
def _find_plugins_from_folders(self, folders, existing, ignored_uninstalled=True):
result = dict()
for folder in folders:
readonly = False
if isinstance(folder, (list, tuple)):
if len(folder) == 2:
folder, readonly = folder
else:
continue
if not os.path.exists(folder):
self.logger.warn("Plugin folder {folder} could not be found, skipping it".format(folder=folder))
continue
entries = os.listdir(folder)
for entry in entries:
path = os.path.join(folder, entry)
if os.path.isdir(path) and os.path.isfile(os.path.join(path, "__init__.py")):
key = entry
elif os.path.isfile(path) and entry.endswith(".py"):
key = entry[:-3] # strip off the .py extension
else:
continue
if key in existing or key in result or (ignored_uninstalled and key in self.marked_plugins["uninstalled"]):
# plugin is already defined, ignore it
continue
plugin = self._import_plugin_from_module(key, folder=folder)
if plugin:
plugin.origin = FolderOrigin("folder", folder)
if readonly:
plugin.bundled = True
plugin.enabled = False
result[key] = plugin
return result
def _find_plugins_from_entry_points(self, groups, existing, ignore_uninstalled=True):
result = dict()
# let's make sure we have a current working set
working_set = pkg_resources.WorkingSet()
if not isinstance(groups, (list, tuple)):
groups = [groups]
for group in groups:
for entry_point in working_set.iter_entry_points(group=group, name=None):
key = entry_point.name
module_name = entry_point.module_name
version = entry_point.dist.version
if key in existing or key in result or (ignore_uninstalled and key in self.marked_plugins["uninstalled"]):
# plugin is already defined or marked as uninstalled, ignore it
continue
kwargs = dict(module_name=module_name, version=version)
package_name = None
try:
module_pkginfo = InstalledEntryPoint(entry_point)
except:
self.logger.exception("Something went wrong while retrieving package info data for module %s" % module_name)
else:
kwargs.update(dict(
name=module_pkginfo.name,
summary=module_pkginfo.summary,
author=module_pkginfo.author,
url=module_pkginfo.home_page,
license=module_pkginfo.license
))
package_name = module_pkginfo.name
plugin = self._import_plugin_from_module(key, **kwargs)
if plugin:
plugin.origin = EntryPointOrigin("entry_point", group, module_name, package_name, version)
plugin.enabled = False
result[key] = plugin
return result
def _import_plugin_from_module(self, key, folder=None, module_name=None, name=None, version=None, summary=None, author=None, url=None, license=None):
# TODO error handling
try:
if folder:
module = imp.find_module(key, [folder])
elif module_name:
module = imp.find_module(module_name)
else:
return None
except:
self.logger.warn("Could not locate plugin {key}")
return None
plugin = self._import_plugin(key, *module, name=name, version=version, summary=summary, author=author, url=url, license=license)
if plugin is None:
return None
if plugin.check():
return plugin
else:
self.logger.warn("Plugin \"{plugin}\" did not pass check".format(plugin=str(plugin)))
return None
def _import_plugin(self, key, f, filename, description, name=None, version=None, summary=None, author=None, url=None, license=None):
try:
instance = imp.load_module(key, f, filename, description)
return PluginInfo(key, filename, instance, name=name, version=version, description=summary, author=author, url=url, license=license)
except:
self.logger.exception("Error loading plugin {key}".format(key=key))
return None
def _is_plugin_disabled(self, key):
return key in self.plugin_disabled_list or key.endswith('disabled')
def reload_plugins(self, startup=False, initialize_implementations=True, force_reload=None):
self.logger.info("Loading plugins from {folders} and installed plugin packages...".format(
folders=", ".join(map(lambda x: x[0] if isinstance(x, tuple) else str(x), self.plugin_folders))
))
if force_reload is None:
force_reload = []
plugins = self.find_plugins(existing=dict((k, v) for k, v in self.plugins.items() if not k in force_reload))
self.disabled_plugins.update(plugins)
for name, plugin in plugins.items():
try:
self.load_plugin(name, plugin, startup=startup, initialize_implementation=initialize_implementations)
if not self._is_plugin_disabled(name):
self.enable_plugin(name, plugin=plugin, initialize_implementation=initialize_implementations, startup=startup)
except PluginNeedsRestart:
pass
except PluginLifecycleException as e:
self.logger.info(str(e))
if len(self.enabled_plugins) <= 0:
self.logger.info("No plugins found")
else:
self.logger.info("Found {count} plugin(s) providing {implementations} mixin implementations, {hooks} hook handlers".format(
count=len(self.enabled_plugins) + len(self.disabled_plugins),
implementations=len(self.plugin_implementations),
hooks=sum(map(lambda x: len(x), self.plugin_hooks.values()))
))
def mark_plugin(self, name, uninstalled=None):
if not name in self.plugins:
self.logger.warn("Trying to mark an unknown plugin {name}".format(**locals()))
if uninstalled is not None:
if uninstalled and not name in self.marked_plugins["uninstalled"]:
self.marked_plugins["uninstalled"].append(name)
elif not uninstalled and name in self.marked_plugins["uninstalled"]:
self.marked_plugins["uninstalled"].remove(name)
def load_plugin(self, name, plugin=None, startup=False, initialize_implementation=True):
if not name in self.plugins:
self.logger.warn("Trying to load an unknown plugin {name}".format(**locals()))
return
if plugin is None:
plugin = self.plugins[name]
try:
plugin.validate("before_load", additional_validators=self.plugin_validators)
plugin.load()
plugin.validate("after_load", additional_validators=self.plugin_validators)
self.on_plugin_loaded(name, plugin)
plugin.loaded = True
self.logger.debug("Loaded plugin {name}: {plugin}".format(**locals()))
except PluginLifecycleException as e:
raise e
except:
self.logger.exception("There was an error loading plugin %s" % name)
def unload_plugin(self, name):
if not name in self.plugins:
self.logger.warn("Trying to unload unknown plugin {name}".format(**locals()))
return
plugin = self.plugins[name]
try:
if plugin.enabled:
self.disable_plugin(name, plugin=plugin)
plugin.unload()
self.on_plugin_unloaded(name, plugin)
if name in self.enabled_plugins:
del self.enabled_plugins[name]
if name in self.disabled_plugins:
del self.disabled_plugins[name]
plugin.loaded = False
self.logger.debug("Unloaded plugin {name}: {plugin}".format(**locals()))
except PluginLifecycleException as e:
raise e
except:
self.logger.exception("There was an error unloading plugin {name}".format(**locals()))
# make sure the plugin is NOT in the list of enabled plugins but in the list of disabled plugins
if name in self.enabled_plugins:
del self.enabled_plugins[name]
if not name in self.disabled_plugins:
self.disabled_plugins[name] = plugin
def enable_plugin(self, name, plugin=None, initialize_implementation=True, startup=False):
if not name in self.disabled_plugins:
self.logger.warn("Tried to enable plugin {name}, however it is not disabled".format(**locals()))
return
if plugin is None:
plugin = self.disabled_plugins[name]
if not startup and self.is_restart_needing_plugin(plugin):
raise PluginNeedsRestart(name)
if self.has_obsolete_hooks(plugin):
raise PluginCantEnable(name, "Dependency on obsolete hooks detected, full functionality cannot be guaranteed")
try:
plugin.enable()
self._activate_plugin(name, plugin)
except PluginLifecycleException as e:
raise e
except:
self.logger.exception("There was an error while enabling plugin {name}".format(**locals()))
return False
else:
if name in self.disabled_plugins:
del self.disabled_plugins[name]
self.enabled_plugins[name] = plugin
plugin.enabled = True
if plugin.implementation:
if initialize_implementation:
if not self.initialize_implementation_of_plugin(name, plugin):
return False
plugin.implementation.on_plugin_enabled()
self.on_plugin_enabled(name, plugin)
self.logger.debug("Enabled plugin {name}: {plugin}".format(**locals()))
return True
def disable_plugin(self, name, plugin=None):
if not name in self.enabled_plugins:
self.logger.warn("Tried to disable plugin {name}, however it is not enabled".format(**locals()))
return
if plugin is None:
plugin = self.enabled_plugins[name]
if self.is_restart_needing_plugin(plugin):
raise PluginNeedsRestart(name)
try:
plugin.disable()
self._deactivate_plugin(name, plugin)
except PluginLifecycleException as e:
raise e
except:
self.logger.exception("There was an error while disabling plugin {name}".format(**locals()))
return False
else:
if name in self.enabled_plugins:
del self.enabled_plugins[name]
self.disabled_plugins[name] = plugin
plugin.enabled = False
if plugin.implementation:
plugin.implementation.on_plugin_disabled()
self.on_plugin_disabled(name, plugin)
self.logger.debug("Disabled plugin {name}: {plugin}".format(**locals()))
return True
def _activate_plugin(self, name, plugin):
plugin.hotchangeable = self.is_restart_needing_plugin(plugin)
# evaluate registered hooks
for hook, callback in plugin.hooks.items():
self.plugin_hooks[hook].append((name, callback))
# evaluate registered implementation
if plugin.implementation:
for plugin_type in self.plugin_types:
if isinstance(plugin.implementation, plugin_type):
self.plugin_implementations_by_type[plugin_type].append((name, plugin.implementation))
self.plugin_implementations[name] = plugin.implementation
def _deactivate_plugin(self, name, plugin):
for hook, callback in plugin.hooks.items():
try:
self.plugin_hooks[hook].remove((name, callback))
except ValueError:
# that's ok, the plugin was just not registered for the hook
pass
if plugin.implementation is not None:
if name in self.plugin_implementations:
del self.plugin_implementations[name]
for plugin_type in self.plugin_types:
try:
self.plugin_implementations_by_type[plugin_type].remove((name, plugin.implementation))
except ValueError:
# that's ok, the plugin was just not registered for the type
pass
def is_restart_needing_plugin(self, plugin):
return self.has_restart_needing_implementation(plugin) or self.has_restart_needing_hooks(plugin)
def has_restart_needing_implementation(self, plugin):
if not plugin.implementation:
return False
return isinstance(plugin.implementation, RestartNeedingPlugin)
def has_restart_needing_hooks(self, plugin):
if not plugin.hooks:
return False
hooks = plugin.hooks.keys()
for hook in hooks:
if self.is_restart_needing_hook(hook):
return True
return False
def has_obsolete_hooks(self, plugin):
if not plugin.hooks:
return False
hooks = plugin.hooks.keys()
for hook in hooks:
if self.is_obsolete_hook(hook):
return True
return False
def is_restart_needing_hook(self, hook):
if self.plugin_restart_needing_hooks is None:
return False
for h in self.plugin_restart_needing_hooks:
if hook.startswith(h):
return True
return False
def is_obsolete_hook(self, hook):
if self.plugin_obsolete_hooks is None:
return False
return hook in self.plugin_obsolete_hooks
def initialize_implementations(self, additional_injects=None, additional_inject_factories=None, additional_pre_inits=None, additional_post_inits=None):
for name, plugin in self.enabled_plugins.items():
self.initialize_implementation_of_plugin(name, plugin,
additional_injects=additional_injects,
additional_inject_factories=additional_inject_factories,
additional_pre_inits=additional_pre_inits,
additional_post_inits=additional_post_inits)
self.logger.info("Initialized {count} plugin implementation(s)".format(count=len(self.plugin_implementations)))
def initialize_implementation_of_plugin(self, name, plugin, additional_injects=None, additional_inject_factories=None, additional_pre_inits=None, additional_post_inits=None):
if plugin.implementation is None:
return
return self.initialize_implementation(name, plugin, plugin.implementation,
additional_injects=additional_injects,
additional_inject_factories=additional_inject_factories,
additional_pre_inits=additional_pre_inits,
additional_post_inits=additional_post_inits)
def initialize_implementation(self, name, plugin, implementation, additional_injects=None, additional_inject_factories=None, additional_pre_inits=None, additional_post_inits=None):
if additional_injects is None:
additional_injects = dict()
if additional_inject_factories is None:
additional_inject_factories = []
if additional_pre_inits is None:
additional_pre_inits = []
if additional_post_inits is None:
additional_post_inits = []
injects = self.implementation_injects
injects.update(additional_injects)
inject_factories = self.implementation_inject_factories
inject_factories += additional_inject_factories
pre_inits = self.implementation_pre_inits
pre_inits += additional_pre_inits
post_inits = self.implementation_post_inits
post_inits += additional_post_inits
try:
kwargs = dict(injects)
kwargs.update(dict(
identifier=name,
plugin_name=plugin.name,
plugin_version=plugin.version,
basefolder=os.path.realpath(plugin.location),
logger=logging.getLogger(self.logging_prefix + name),
))
# inject the additional_injects
for arg, value in kwargs.items():
setattr(implementation, "_" + arg, value)
# inject any injects produced in the additional_inject_factories
for factory in inject_factories:
try:
return_value = factory(name, implementation)
except:
self.logger.exception("Exception while executing injection factory %r" % factory)
else:
if return_value is not None:
if isinstance(return_value, dict):
for arg, value in return_value.items():
setattr(implementation, "_" + arg, value)
# execute any additional pre init methods
for pre_init in pre_inits:
pre_init(name, implementation)
implementation.initialize()
# execute any additional post init methods
for post_init in post_inits:
post_init(name, implementation)
except Exception as e:
self._deactivate_plugin(name, plugin)
plugin.enabled = False
if isinstance(e, PluginLifecycleException):
raise e
else:
self.logger.exception("Exception while initializing plugin {name}, disabling it".format(**locals()))
return False
else:
self.on_plugin_implementations_initialized(name, plugin)
self.logger.debug("Initialized plugin mixin implementation for plugin {name}".format(**locals()))
return True
def log_all_plugins(self, show_bundled=True, bundled_str=(" (bundled)", ""), show_location=True, location_str=" = {location}", show_enabled=True, enabled_str=(" ", "!")):
all_plugins = self.enabled_plugins.values() + self.disabled_plugins.values()
if len(all_plugins) <= 0:
self.logger.info("No plugins available")
else:
self.logger.info("{count} plugin(s) registered with the system:\n{plugins}".format(count=len(all_plugins), plugins="\n".join(
map(lambda x: "| " + x.long_str(show_bundled=show_bundled,
bundled_strs=bundled_str,
show_location=show_location,
location_str=location_str,
show_enabled=show_enabled,
enabled_strs=enabled_str),
sorted(self.plugins.values(), key=lambda x: str(x).lower()))
)))
def get_plugin(self, identifier, require_enabled=True):
"""
Retrieves the module of the plugin identified by ``identifier``. If the plugin is not registered or disabled and
``required_enabled`` is True (the default) None will be returned.
Arguments:
identifier (str): The identifier of the plugin to retrieve.
require_enabled (boolean): Whether to only return the plugin if is enabled (True, default) or also if it's
disabled.
Returns:
module: The requested plugin module or None
"""
plugin_info = self.get_plugin_info(identifier, require_enabled=require_enabled)
if plugin_info is not None:
return plugin_info.instance
return None
def get_plugin_info(self, identifier, require_enabled=True):
"""
Retrieves the :class:`PluginInfo` instance identified by ``identifier``. If the plugin is not registered or
disabled and ``required_enabled`` is True (the default) None will be returned.
Arguments:
identifier (str): The identifier of the plugin to retrieve.
require_enabled (boolean): Whether to only return the plugin if is enabled (True, default) or also if it's
disabled.
Returns:
~.PluginInfo: The requested :class:`PluginInfo` or None
"""
if identifier in self.enabled_plugins:
return self.enabled_plugins[identifier]
elif not require_enabled and identifier in self.disabled_plugins:
return self.disabled_plugins[identifier]
return None
def get_hooks(self, hook):
"""
Retrieves all registered handlers for the specified hook.
Arguments:
hook (str): The hook for which to retrieve the handlers.
Returns:
dict: A dict containing all registered handlers mapped by their plugin's identifier.
"""
if not hook in self.plugin_hooks:
return dict()
return {hook[0]: hook[1] for hook in self.plugin_hooks[hook]}
def get_implementations(self, *types):
"""
Get all mixin implementations that implement *all* of the provided ``types``.
Arguments:
types (one or more type): The types a mixin implementation needs to implement in order to be returned.
Returns:
list: A list of all found implementations
"""
result = None
for t in types:
implementations = self.plugin_implementations_by_type[t]
if result is None:
result = set(implementations)
else:
result = result.intersection(implementations)
if result is None:
return dict()
return [impl[1] for impl in result]
def get_filtered_implementations(self, f, *types):
"""
Get all mixin implementation that implementat *all* of the provided ``types`` and match the provided filter `f`.
Arguments:
f (callable): A filter function returning True for implementations to return and False for those to exclude.
types (one or more type): The types a mixin implementation needs to implement in order to be returned.
Returns:
list: A list of all found and matching implementations.
"""
assert callable(f)
implementations = self.get_implementations(*types)
return filter(f, implementations)
def get_helpers(self, name, *helpers):
"""
Retrieves the named ``helpers`` for the plugin with identifier ``name``.
If the plugin is not available, returns None. Otherwise returns a :class:`dict` with the requested plugin
helper names mapped to the method - if a helper could not be resolved, it will be missing from the dict.
Arguments:
name (str): Identifier of the plugin for which to look up the ``helpers``.
helpers (one or more str): Identifiers of the helpers of plugin ``name`` to return.
Returns:
dict: A dictionary of all resolved helpers, mapped by their identifiers, or None if the plugin was not
registered with the system.
"""
if not name in self.enabled_plugins:
return None
plugin = self.enabled_plugins[name]
all_helpers = plugin.helpers
if len(helpers):
return dict((k, v) for (k, v) in all_helpers.items() if k in helpers)
else:
return all_helpers
def register_message_receiver(self, client):
"""
Registers a ``client`` for receiving plugin messages. The ``client`` needs to be a callable accepting two
input arguments, ``plugin`` (the sending plugin's identifier) and ``data`` (the message itself).
"""
if client is None:
return
self.registered_clients.append(client)
def unregister_message_receiver(self, client):
"""
Unregisters a ``client`` for receiving plugin messages.
"""
self.registered_clients.remove(client)
def send_plugin_message(self, plugin, data):
"""
Sends ``data`` in the name of ``plugin`` to all currently registered message receivers by invoking them
with the two arguments.
Arguments:
plugin (str): The sending plugin's identifier.
data (object): The message.
"""
for client in self.registered_clients:
try: client(plugin, data)
except: self.logger.exception("Exception while sending plugin data to client")
class InstalledEntryPoint(pkginfo.Installed):
def __init__(self, entry_point, metadata_version=None):
self.entry_point = entry_point
package = entry_point.module_name
pkginfo.Installed.__init__(self, package, metadata_version=metadata_version)
def read(self):
import sys
import glob
import warnings
opj = os.path.join
if self.package is not None:
package = self.package.__package__
if package is None:
package = self.package.__name__
project = pkg_resources.to_filename(pkg_resources.safe_name(self.entry_point.dist.project_name))
package_pattern = '%s*.egg-info' % package
project_pattern = '%s*.egg-info' % project
file = getattr(self.package, '__file__', None)
if file is not None:
candidates = []
def _add_candidate(where):
candidates.extend(glob.glob(where))
for entry in sys.path:
if file.startswith(entry):
_add_candidate(opj(entry, 'EGG-INFO')) # egg?
for pattern in (package_pattern, project_pattern): # dist-installed?
_add_candidate(opj(entry, pattern))
dir, name = os.path.split(self.package.__file__)
for pattern in (package_pattern, project_pattern):
_add_candidate(opj(dir, pattern))
_add_candidate(opj(dir, '..', pattern))
for candidate in candidates:
if os.path.isdir(candidate):
path = opj(candidate, 'PKG-INFO')
else:
path = candidate
if os.path.exists(path):
with open(path) as f:
return f.read()
warnings.warn('No PKG-INFO found for package: %s' % self.package_name)
class Plugin(object):
"""
The parent class of all plugin implementations.
.. attribute:: _identifier
The identifier of the plugin. Injected by the plugin core system upon initialization of the implementation.
.. attribute:: _plugin_name
The name of the plugin. Injected by the plugin core system upon initialization of the implementation.
.. attribute:: _plugin_version
The version of the plugin. Injected by the plugin core system upon initialization of the implementation.
.. attribute:: _basefolder
The base folder of the plugin. Injected by the plugin core system upon initialization of the implementation.
.. attribute:: _logger
The logger instance to use, with the logging name set to the :attr:`PluginManager.logging_prefix` of the
:class:`PluginManager` concatenated with :attr:`_identifier`. Injected by the plugin core system upon
initialization of the implementation.
"""
def initialize(self):
"""
Called by the plugin core after performing all injections. Override this to initialize your implementation.
"""
pass
def on_plugin_enabled(self):
pass
def on_plugin_disabled(self):
pass
class RestartNeedingPlugin(Plugin):
pass
class PluginNeedsRestart(Exception):
def __init__(self, name):
Exception.__init__(self)
self.name = name
self.message = "Plugin {name} cannot be enabled or disabled after system startup".format(**locals())
class PluginLifecycleException(Exception):
def __init__(self, name, reason, message):
Exception.__init__(self)
self.name = name
self.reason = reason
self.message = message.format(**locals())
def __str__(self):
return self.message
class PluginCantInitialize(PluginLifecycleException):
def __init__(self, name, reason):
PluginLifecycleException.__init__(self, name, reason, "Plugin {name} cannot be initialized: {reason}")
class PluginCantEnable(PluginLifecycleException):
def __init__(self, name, reason):
PluginLifecycleException.__init__(self, name, reason, "Plugin {name} cannot be enabled: {reason}")
class PluginCantDisable(PluginLifecycleException):
def __init__(self, name, reason):
PluginLifecycleException.__init__(self, name, reason, "Plugin {name} cannot be disabled: {reason}")
| agpl-3.0 |
mungerd/plastex | plasTeX/Base/LaTeX/Index.py | 5 | 13222 | #!/usr/bin/env python
"""
C.11.5 Index and Glossary (p211)
"""
import string, os
from plasTeX.Tokenizer import Token, EscapeSequence
from plasTeX import Command, Environment, IgnoreCommand, encoding
from plasTeX.Logging import getLogger
from Sectioning import SectionUtils
try:
from pyuca import Collator
collator = Collator(os.path.join(os.path.dirname(__file__), 'allkeys.txt')).sort_key
except ImportError:
collator = lambda x: x.lower()
class hyperpage(IgnoreCommand):
args = 'page:nox'
class hyperindexformat(IgnoreCommand):
args = 'fmt:nox page:nox'
class IndexUtils(object):
""" Helper functions for generating indexes """
linkType = 'index'
level = Command.CHAPTER_LEVEL
class Index(Command):
"""
Utility class used to surface the index entries to the renderer
"""
def __init__(self, *args, **kwargs):
Command.__init__(self, *args, **kwargs)
self.pages = []
self.key = []
self.sortkey = ''
@property
def totallen(self):
""" Return the total number of entries generated by this entry """
total = 1
for item in self:
total += item.totallen
return total
def __repr__(self):
return '%s%s --> %s' % (''.join([x.source for x in self.key]),
', '.join([str(x) for x in self.pages]),
Command.__repr__(self))
class IndexGroup(list):
title = None
def invoke(self, tex):
if isinstance(self, Environment):
Environment.invoke(self, tex)
else:
Command.invoke(self, tex)
self.attributes['title'] = self.ownerDocument.createElement('indexname').expand(tex)
@property
def groups(self):
"""
Group index entries into batches according to the first letter
"""
batches = []
current = ''
for item in self:
try:
label = title = item.sortkey[0].upper()
if title in encoding.stringletters():
pass
elif title == '_':
title = '_ (Underscore)'
else:
label = title = 'Symbols'
except IndexError:
label = title = 'Symbols'
if current != title:
newgroup = self.IndexGroup()
newgroup.title = title
newgroup.id = label
batches.append(newgroup)
current = title
batches[-1].append(item)
for item in batches:
item[:] = self.splitColumns(item,
self.ownerDocument.config['document']['index-columns'])
return batches
def splitColumns(self, items, cols):
"""
Divide the index entries into the specified number of columns
Required Arguments:
items -- list of column entries
cols -- number of columns to create
Returns:
list of length `cols' containing groups of column entries
"""
entries = [(0,0)]
# Find the total number of entries
grandtotal = 0
for item in items:
entries.append((item.totallen, item))
grandtotal += entries[-1][0]
entries.pop(0)
entries.reverse()
# Get total number of entries per column
coltotal = int(grandtotal / cols)
# Group entries into columns
current = 0
output = [[]]
for num, item in entries:
current += num
if len(output) >= cols:
output[-1].append(item)
elif current > coltotal:
output.append([item])
current = num
elif current == coltotal:
output[-1].append(item)
output.append([])
current = 0
else:
output[-1].append(item)
output.reverse()
for item in output:
item.reverse()
# Get rid of empty columns
output = [x for x in output if x]
# Pad to the correct number of columns
for i in range(cols-len(output)):
output.append([])
return output
def digest(self, tokens):
""" Sort and group index entries """
if isinstance(self, Environment):
Environment.digest(self, tokens)
if self.macroMode == self.MODE_END:
return
# Throw it all away, we don't need it. We'll be generating
# our own index entries below.
while self.childNodes:
self.pop()
else:
Command.digest(self, tokens)
doc = self.ownerDocument
current = self
entries = sorted(self.ownerDocument.userdata.get('index', []))
prev = IndexEntry([], None)
for item in entries:
# See how many levels we need to add/subtract between this one
# and the previous
common = 0
for prevkey, itemkey in zip(zip(prev.sortkey, prev.key),
zip(item.sortkey, item.key)):
if prevkey == itemkey:
common += 1
continue
break
# print
# print item
# print (prev.key, prev.sortkey), (item.key, item.sortkey), common
# Pop out to the common level
i = common
while i < len(prev.key):
# print 'POP'
current = current.parentNode
i += 1
# Add the appropriate number of levels
i = common
while i < len(item.key):
# print 'ADD', item.sortkey[i]
newidx = self.Index()
newidx.key = item.key[i]
newidx.sortkey = item.sortkey[i]
newidx.parentNode = current
current.append(newidx)
current = newidx
i += 1
# Add the current page and format it
current.pages.append(IndexDestination(item.type, item.node))
if item.format is not None:
text = doc.createTextNode(str(len(current.pages)))
ipn = item.format.getElementsByTagName('index-page-number')
if ipn:
ipn = ipn[0]
ipn.parentNode.replaceChild(text, ipn)
item.node.append(item.format)
else:
text = doc.createTextNode(str(len(current.pages)))
item.node.append(text)
prev = item
class IndexDestination(object):
def __init__(self, type, node):
self._cr_type = type
self._cr_node = node
@property
def see(self):
return self._cr_type == IndexEntry.TYPE_SEE
@property
def seealso(self):
return self._cr_type == IndexEntry.TYPE_SEEALSO
@property
def normal(self):
return not(self.see) and not(self.seealso)
def __getattribute__(self, name):
if name.startswith('_cr_') or name in ['see', 'seealso', 'normal']:
return object.__getattribute__(self, name)
if self._cr_type and name in ['url']:
return None
return getattr(self._cr_node, name)
def __unicode__(self):
return unicode(self._cr_node)
class theindex(IndexUtils, Environment, SectionUtils):
blockType = True
level = Environment.CHAPTER_LEVEL
counter = 'chapter'
class printindex(IndexUtils, Command, SectionUtils):
blockType = True
level = Command.CHAPTER_LEVEL
counter = 'chapter'
class makeindex(Command):
pass
class makeglossary(Command):
pass
class glossary(Command):
args = 'entry:nox'
class index(Command):
args = 'entry:nox'
@property
def textContent(self):
return ''
def invoke(self, tex):
result = Command.invoke(self, tex)
sortkey, key, format = [], [], []
entry = iter(self.attributes['entry'])
current = []
alphanumeric = [Token.CC_OTHER, Token.CC_LETTER, Token.CC_SPACE]
# Parse the index tokens
for tok in entry:
if tok.catcode in alphanumeric:
# Escape character
if tok == '"':
for tok in entry:
current.append(tok)
break
# Entry separator
elif tok == '!':
key.append(current)
if len(sortkey) < len(key):
sortkey.append(current)
current = []
# Sort key separator
elif tok == '@':
sortkey.append(current)
current = []
# Format separator
elif tok == '|':
key.append(current)
if len(sortkey) < len(key):
sortkey.append(current)
current = format
else:
current.append(tok)
continue
# Everything else
current.append(tok)
# Make sure to get the stuff at the end
if not format:
key.append(current)
if len(sortkey) < len(key):
sortkey.append(current)
# Convert the sort keys to strings
for i, item in enumerate(sortkey):
sortkey[i] = tex.expandTokens(item).textContent
# Expand the key tokens
for i, item in enumerate(key):
key[i] = tex.expandTokens(item)
# Get the format element
type = IndexEntry.TYPE_NORMAL
if not format:
format = None
else:
macro = []
while format and format[0].catcode == Token.CC_LETTER:
macro.append(format.pop(0))
if macro:
macro = ''.join(macro)
format.insert(0, EscapeSequence(macro))
if macro == 'see':
type = IndexEntry.TYPE_SEE
elif macro == 'seealso':
type = IndexEntry.TYPE_SEEALSO
format.append(EscapeSequence('index-page-number'))
format = tex.expandTokens(format)
# Store the index information in the document
userdata = self.ownerDocument.userdata
if 'index' not in userdata:
userdata['index'] = []
userdata['index'].append(IndexEntry(key, self, sortkey, format, type))
return result
class IndexEntry(object):
"""
Utility class used to assist in the sorting of index entries
"""
TYPE_NORMAL = 0
TYPE_SEE = 1
TYPE_SEEALSO = 2
def __init__(self, key, node, sortkey=None, format=None, type=0):
"""
Required Arguments:
key -- a list of keys for the index entry
node -- the node of the document that the index entry is
associated with
sortkey -- a list of sort keys, one per key, to be used for
sorting instead of the key values
format -- formatting that should be used to format the
destination of the index entry
type -- the type of entry that this is: TYPE_NORMAL, TYPE_SEE,
or TYPE_SEEALSO
"""
self.key = key
if not sortkey:
self.sortkey = key
else:
self.sortkey = []
for i, sk in enumerate(sortkey):
if sk is None:
self.sortkey.append(key[i].textContent)
else:
self.sortkey.append(sk)
self.format = format
self.node = node
self.type = type
@property
def see(self):
return self.type == type(self).TYPE_SEE
@property
def seealso(self):
return self.type == type(self).TYPE_SEEALSO
@property
def normal(self):
return not(self.see) and not(self.seealso)
def __cmp__(self, other):
result = cmp(zip([collator(x) for x in self.sortkey if isinstance(x, basestring)],
[collator(x.textContent) for x in self.key],
self.key),
zip([collator(x) for x in other.sortkey if isinstance(x, basestring)],
[collator(x.textContent) for x in other.key],
other.key))
if result == 0 and len(self.key) != len(other.key):
return cmp(len(self.key), len(other.key))
return result
def __repr__(self):
if self.format is None:
return ' '.join(['@'.join(self.sortkey),
'!'.join([x.source for x in self.key])])
else:
return ' '.join(['@'.join(self.sortkey),
'!'.join([x.source for x in self.key]),
' '.join([x.source for x in self.format])])
def __str__(self):
return repr(self)
class IndexPageNumber(Command):
macroName = 'index-page-number'
| mit |
nitzmahone/ansible | test/units/modules/network/eos/test_eos_banner.py | 55 | 3617 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.eos import eos_banner
from units.modules.utils import set_module_args
from .eos_module import TestEosModule, load_fixture
class TestEosBannerModule(TestEosModule):
module = eos_banner
def setUp(self):
super(TestEosBannerModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.eos.eos_banner.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible.modules.network.eos.eos_banner.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestEosBannerModule, self).tearDown()
self.mock_run_commands.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
if transport == 'cli':
self.run_commands.return_value = [load_fixture('eos_banner_show_banner.txt').strip()]
else:
self.run_commands.return_value = [{'loginBanner': load_fixture('eos_banner_show_banner.txt').strip()}]
self.load_config.return_value = dict(diff=None, session='session')
def test_eos_banner_create_with_cli_transport(self):
set_module_args(dict(banner='login', text='test\nbanner\nstring',
transport='cli'))
commands = ['banner login', 'test', 'banner', 'string', 'EOF']
self.execute_module(changed=True, commands=commands)
def test_eos_banner_remove_with_cli_transport(self):
set_module_args(dict(banner='login', state='absent', transport='cli'))
commands = ['no banner login']
self.execute_module(changed=True, commands=commands)
def test_eos_banner_create_with_eapi_transport(self):
set_module_args(dict(banner='login', text='test\nbanner\nstring',
transport='eapi'))
commands = ['banner login']
inputs = ['test\nbanner\nstring']
self.execute_module(changed=True, commands=commands, inputs=inputs, transport='eapi')
def test_eos_banner_remove_with_eapi_transport(self):
set_module_args(dict(banner='login', state='absent', transport='eapi'))
commands = ['no banner login']
self.execute_module(changed=True, commands=commands, transport='eapi')
def test_eos_banner_nochange_with_cli_transport(self):
banner_text = load_fixture('eos_banner_show_banner.txt').strip()
set_module_args(dict(banner='login', text=banner_text, transport='cli'))
self.execute_module()
def test_eos_banner_nochange_with_eapi_transport(self):
banner_text = load_fixture('eos_banner_show_banner.txt').strip()
set_module_args(dict(banner='login', text=banner_text, transport='eapi'))
self.execute_module(transport='eapi')
| gpl-3.0 |
jacobq/csci5221-viro-project | tests/unit/lib/mock_socket_test.py | 45 | 2309 | #!/usr/bin/env python
#
# Copyright 2011-2012 Andreas Wundsam
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os.path
from copy import copy
sys.path.append(os.path.dirname(__file__) + "/../../..")
from pox.lib.mock_socket import MockSocket
class MockSocketTest(unittest.TestCase):
def setUp(self):
pass
def test_simple_send(self):
(a, b) = MockSocket.pair()
a.send("Hallo")
self.assertEquals(b.recv(), "Hallo")
b.send("Servus")
self.assertEquals(a.recv(), "Servus")
def test_ready_to_recv(self):
(a, b) = MockSocket.pair()
a.send("Hallo")
self.assertFalse(a.ready_to_recv())
self.assertTrue(b.ready_to_recv())
self.assertEquals(b.recv(), "Hallo")
self.assertFalse(b.ready_to_recv())
self.assertFalse(a.ready_to_recv())
b.send("Servus")
self.assertTrue(a.ready_to_recv())
self.assertEquals(a.recv(), "Servus")
self.assertFalse(a.ready_to_recv())
def test_on_ready_to_recv(self):
self.seen_size = -1
self.called = 0
def ready(socket, size):
self.called += 1
self.seen_size = size
(a, b) = MockSocket.pair()
b.set_on_ready_to_recv(ready)
self.assertEquals(self.called, 0)
a.send("Hallo")
self.assertEquals(self.called, 1)
self.assertEquals(self.seen_size, 5)
# check that it doesn't get called on the other sockets data
b.send("Huhu")
self.assertEquals(self.called, 1)
def test_empty_recv(self):
""" test_empty_recv: Check that empty reads on socket return ""
Note that this is actually non-sockety behavior and should probably be changed. This
test documents it as intended for now, though
"""
(a, b) = MockSocket.pair()
self.assertEquals(a.recv(), "")
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
mir-ror/linux-yocto-dev | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
cmisenas/artos | PyARTOS/config.py | 2 | 7158 | """Reads and writes the configuration file of PyARTOS.
This module provides access to the configuration given in the file 'pyartos.ini',
which is searched for in the current working directory. To access the configuration
options, the 'config' object in this module's dictionary can be used, which is an
instance of the private _Config class.
"""
try:
# Python 3
from configparser import ConfigParser as SafeConfigParser
except:
# Python 2
from ConfigParser import SafeConfigParser
import os.path
class _Config(SafeConfigParser):
"""Class to access configuration options of PyARTOS.
Rather than instantiating this class, use the 'config' instance available in the dictionary
of the module.
"""
def __init__(self, iniFile):
"""Initializes a new configuration instance and sets appropriate default values."""
SafeConfigParser.__init__(self, allow_no_value = True)
self.iniFileName = iniFile
self.read(iniFile)
self.defaults = { }
self.applyDefaults({
'libartos' : {
'model_dir' : _Config.findModelDir(),
'library_path' : None,
'debug' : 0,
},
'ImageNet' : {
'repository_directory' : None
},
'GUI' : {
'max_video_width' : 640,
'max_video_height' : 480
}
});
def applyDefaults(self, defaultDict):
"""Applies default values from a dictionary.
defaultDict - Dictionary whose keys are section names and whose values are dictionaries
with the default configuration options for that section.
"""
for section in defaultDict:
if not section in self.defaults:
self.defaults[section] = { }
if not self.has_section(section):
self.add_section(section)
for option in defaultDict[section]:
self.defaults[section][option] = defaultDict[section][option]
if not self.has_option(section, option):
self.set(section, option, None)
def get(self, section, option, useDefaults = True):
"""Get an option value for the named section.
If useDefaults is set to true, this function falls back to default values
if the given option hasn't been specified in the configuration file or is empty.
"""
try:
value = SafeConfigParser.get(self, section, option)
except:
value = None
if useDefaults and ((value is None) or (value == '')) \
and (section in self.defaults) and (option in self.defaults[section]):
value = self.defaults[section][option]
return value
def getInt(self, section, option, useDefaults = True, min = None, max = None):
"""Get an option value for the named section as integer.
If useDefaults is set to true, this function falls back to default values if the
given option hasn't been specified in the configuration file or isn't an integral value.
The range of allowable values can be specified using the min and max parameters. The value
from the configuration file will be clipped to that range.
"""
try:
value = int(SafeConfigParser.get(self, section, option))
except:
value = None
if useDefaults and ((value is None) or (value == '')) \
and (section in self.defaults) and (option in self.defaults[section]):
value = self.defaults[section][option]
if not value is None:
if (not min is None) and (value < min):
value = min
elif (not max is None) and (value > max):
value = max
return value
def getBool(self, section, option, useDefaults = True):
"""Get an option value for the named section as boolean value.
If useDefaults is set to true, this function falls back to default values if the
given option hasn't been specified in the configuration file or can't be interpreted as a boolean value.
Empty strings, the strings 'no', 'off' and 'false' as well as the number 0 will be interpreted as False.
Every number different from 0 as well as the strings 'yes', 'on' and 'true' will be interpreted as True.
"""
def toBool(str):
try:
intval = int(str)
return (intval != 0)
except:
pass
try:
if str.lower() in ('', 'no', 'off', 'false'):
return False
elif str.lower() in ('yes', 'on', 'true'):
return True
except:
pass
return None
value = toBool(SafeConfigParser.get(self, section, option))
if useDefaults and ((value is None) or (value == '')) \
and (section in self.defaults) and (option in self.defaults[section]):
value = toBool(self.defaults[section][option])
return value
def is_set(self, section, option):
"""Determines if a given option has been set in the configuration file (regardless of default values)."""
try:
value = SafeConfigParser.get(self, section, option)
except:
value = None
return (not value is None) and (value != '')
def differentFromDefault(self, section, option):
"""Determines if a given configuration option differs from it's default value."""
if (not section in self.defaults) or (not option in self.defaults[section]):
return True
else:
return (self.get(section, option) != self.defaults[section][option])
def save(self):
"""Writes the configuration options to the file they were read from."""
with open(self.iniFileName, 'w') as file:
self.write(file)
@staticmethod
def findModelDir():
"""Searches for a potential model directory.
Searches for a directory named 'models' in the current working directory, one level above
the current working directory, the executed script's directory and the packages's directory.
The first match is returned or an empty string if no directory has been found.
"""
basedir = os.path.dirname(os.path.abspath(__file__))
tests = ['models', os.path.join('..','models'), os.path.join(basedir,'..','models'), os.path.join(basedir,'models')]
for t in tests:
if (os.path.isdir(t)):
return os.path.realpath(t)
else:
return ''
config = _Config('pyartos.ini') | gpl-3.0 |
JorgeCoock/django | django/contrib/gis/maps/google/__init__.py | 287 | 2771 | """
This module houses the GoogleMap object, used for generating
the needed javascript to embed Google Maps in a Web page.
Google(R) is a registered trademark of Google, Inc. of Mountain View, California.
Example:
* In the view:
return render_to_response('template.html', {'google' : GoogleMap(key="abcdefg")})
* In the template:
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
{{ google.xhtml }}
<head>
<title>Google Maps via GeoDjango</title>
{{ google.style }}
{{ google.scripts }}
</head>
{{ google.body }}
<div id="{{ google.dom_id }}" style="width:600px;height:400px;"></div>
</body>
</html>
Note: If you want to be more explicit in your templates, the following are
equivalent:
{{ google.body }} => "<body {{ google.onload }} {{ google.onunload }}>"
{{ google.xhtml }} => "<html xmlns="http://www.w3.org/1999/xhtml" {{ google.xmlns }}>"
{{ google.style }} => "<style>{{ google.vml_css }}</style>"
Explanation:
- The `xhtml` property provides the correct XML namespace needed for
Google Maps to operate in IE using XHTML. Google Maps on IE uses
VML to draw polylines. Returns, by default:
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
- The `style` property provides the correct style tag for the CSS
properties required by Google Maps on IE:
<style type="text/css">v\:* {behavior:url(#default#VML);}</style>
- The `scripts` property provides the necessary <script> tags for
including the Google Maps javascript, as well as including the
generated javascript.
- The `body` property provides the correct attributes for the
body tag to load the generated javascript. By default, returns:
<body onload="gmap_load()" onunload="GUnload()">
- The `dom_id` property returns the DOM id for the map. Defaults to "map".
The following attributes may be set or customized in your local settings:
* GOOGLE_MAPS_API_KEY: String of your Google Maps API key. These are tied
to a domain. May be obtained from http://www.google.com/apis/maps/
* GOOGLE_MAPS_API_VERSION (optional): Defaults to using "2.x"
* GOOGLE_MAPS_URL (optional): Must have a substitution ('%s') for the API
version.
"""
from django.contrib.gis.maps.google.gmap import GoogleMap, GoogleMapSet
from django.contrib.gis.maps.google.overlays import (
GEvent, GIcon, GMarker, GPolygon, GPolyline,
)
from django.contrib.gis.maps.google.zoom import GoogleZoom
__all__ = [
'GoogleMap', 'GoogleMapSet', 'GEvent', 'GIcon', 'GMarker', 'GPolygon',
'GPolyline', 'GoogleZoom',
]
| bsd-3-clause |
urandu/mfl_api | chul/filters.py | 1 | 2760 | import django_filters
from .models import (
CommunityHealthUnit,
CommunityHealthWorker,
CommunityHealthWorkerContact,
Status,
CommunityHealthUnitContact,
Approver,
CommunityHealthUnitApproval,
CommunityHealthWorkerApproval,
ApprovalStatus
)
from common.filters.filter_shared import CommonFieldsFilterset
class ApproverFilter(CommonFieldsFilterset):
class Meta(object):
model = Approver
class CommunityHealthUnitApprovalFilter(CommonFieldsFilterset):
class Meta(object):
model = CommunityHealthUnitApproval
class CommunityHealthWorkerApprovalFilter(CommonFieldsFilterset):
class Meta(object):
model = CommunityHealthWorkerApproval
class ApprovalStatusFilter(CommonFieldsFilterset):
class Meta(object):
model = ApprovalStatus
class StatusFilter(CommonFieldsFilterset):
name = django_filters.CharFilter(lookup_type='icontains')
description = django_filters.CharFilter(lookup_type='icontains')
class Meta(object):
model = Status
class CommunityHealthUnitContactFilter(CommonFieldsFilterset):
health_unit = django_filters.AllValuesFilter(lookup_type='exact')
contact = django_filters.AllValuesFilter(lookup_type='exact')
class Meta(object):
model = CommunityHealthUnitContact
class CommunityHealthUnitFilter(CommonFieldsFilterset):
name = django_filters.CharFilter(lookup_type='icontains')
facility = django_filters.AllValuesFilter(lookup_type='exact')
ward = django_filters.CharFilter(name='community__ward')
constituency = django_filters.CharFilter(
name='community_ward__constituency')
county = django_filters.CharFilter(
name='community__ward__constituency__county')
class Meta(object):
model = CommunityHealthUnit
class CommunityHealthWorkerFilter(CommonFieldsFilterset):
first_name = django_filters.CharFilter(lookup_type='icontains')
last_name = django_filters.CharFilter(lookup_type='icontains')
username = django_filters.CharFilter(lookup_type='icontains')
id_number = django_filters.CharFilter(lookup_type='exact')
ward = django_filters.CharFilter(name='health_unit__community__ward')
constituency = django_filters.CharFilter(
name='health_unit__community_ward__constituency')
county = django_filters.CharFilter(
name='health_unit__community__ward__constituency__county')
class Meta(object):
model = CommunityHealthWorker
class CommunityHealthWorkerContactFilter(CommonFieldsFilterset):
health_worker = django_filters.AllValuesFilter(lookup_type='icontains')
contact = django_filters.AllValuesFilter(lookup_type='icontains')
class Meta(object):
model = CommunityHealthWorkerContact
| mit |
Pretio/boto | boto/gs/lifecycle.py | 157 | 9086 | # Copyright 2013 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.exception import InvalidLifecycleConfigError
# Relevant tags for the lifecycle configuration XML document.
LIFECYCLE_CONFIG = 'LifecycleConfiguration'
RULE = 'Rule'
ACTION = 'Action'
DELETE = 'Delete'
CONDITION = 'Condition'
AGE = 'Age'
CREATED_BEFORE = 'CreatedBefore'
NUM_NEWER_VERSIONS = 'NumberOfNewerVersions'
IS_LIVE = 'IsLive'
# List of all action elements.
LEGAL_ACTIONS = [DELETE]
# List of all action parameter elements.
LEGAL_ACTION_PARAMS = []
# List of all condition elements.
LEGAL_CONDITIONS = [AGE, CREATED_BEFORE, NUM_NEWER_VERSIONS, IS_LIVE]
# Dictionary mapping actions to supported action parameters for each action.
LEGAL_ACTION_ACTION_PARAMS = {
DELETE: [],
}
class Rule(object):
"""
A lifecycle rule for a bucket.
:ivar action: Action to be taken.
:ivar action_params: A dictionary of action specific parameters. Each item
in the dictionary represents the name and value of an action parameter.
:ivar conditions: A dictionary of conditions that specify when the action
should be taken. Each item in the dictionary represents the name and value
of a condition.
"""
def __init__(self, action=None, action_params=None, conditions=None):
self.action = action
self.action_params = action_params or {}
self.conditions = conditions or {}
# Name of the current enclosing tag (used to validate the schema).
self.current_tag = RULE
def validateStartTag(self, tag, parent):
"""Verify parent of the start tag."""
if self.current_tag != parent:
raise InvalidLifecycleConfigError(
'Invalid tag %s found inside %s tag' % (tag, self.current_tag))
def validateEndTag(self, tag):
"""Verify end tag against the start tag."""
if tag != self.current_tag:
raise InvalidLifecycleConfigError(
'Mismatched start and end tags (%s/%s)' %
(self.current_tag, tag))
def startElement(self, name, attrs, connection):
if name == ACTION:
self.validateStartTag(name, RULE)
elif name in LEGAL_ACTIONS:
self.validateStartTag(name, ACTION)
# Verify there is only one action tag in the rule.
if self.action is not None:
raise InvalidLifecycleConfigError(
'Only one action tag is allowed in each rule')
self.action = name
elif name in LEGAL_ACTION_PARAMS:
# Make sure this tag is found in an action tag.
if self.current_tag not in LEGAL_ACTIONS:
raise InvalidLifecycleConfigError(
'Tag %s found outside of action' % name)
# Make sure this tag is allowed for the current action tag.
if name not in LEGAL_ACTION_ACTION_PARAMS[self.action]:
raise InvalidLifecycleConfigError(
'Tag %s not allowed in action %s' % (name, self.action))
elif name == CONDITION:
self.validateStartTag(name, RULE)
elif name in LEGAL_CONDITIONS:
self.validateStartTag(name, CONDITION)
# Verify there is no duplicate conditions.
if name in self.conditions:
raise InvalidLifecycleConfigError(
'Found duplicate conditions %s' % name)
else:
raise InvalidLifecycleConfigError('Unsupported tag ' + name)
self.current_tag = name
def endElement(self, name, value, connection):
self.validateEndTag(name)
if name == RULE:
# We have to validate the rule after it is fully populated because
# the action and condition elements could be in any order.
self.validate()
elif name == ACTION:
self.current_tag = RULE
elif name in LEGAL_ACTIONS:
self.current_tag = ACTION
elif name in LEGAL_ACTION_PARAMS:
self.current_tag = self.action
# Add the action parameter name and value to the dictionary.
self.action_params[name] = value.strip()
elif name == CONDITION:
self.current_tag = RULE
elif name in LEGAL_CONDITIONS:
self.current_tag = CONDITION
# Add the condition name and value to the dictionary.
self.conditions[name] = value.strip()
else:
raise InvalidLifecycleConfigError('Unsupported end tag ' + name)
def validate(self):
"""Validate the rule."""
if not self.action:
raise InvalidLifecycleConfigError(
'No action was specified in the rule')
if not self.conditions:
raise InvalidLifecycleConfigError(
'No condition was specified for action %s' % self.action)
def to_xml(self):
"""Convert the rule into XML string representation."""
s = '<' + RULE + '>'
s += '<' + ACTION + '>'
if self.action_params:
s += '<' + self.action + '>'
for param in LEGAL_ACTION_PARAMS:
if param in self.action_params:
s += ('<' + param + '>' + self.action_params[param] + '</'
+ param + '>')
s += '</' + self.action + '>'
else:
s += '<' + self.action + '/>'
s += '</' + ACTION + '>'
s += '<' + CONDITION + '>'
for condition in LEGAL_CONDITIONS:
if condition in self.conditions:
s += ('<' + condition + '>' + self.conditions[condition] + '</'
+ condition + '>')
s += '</' + CONDITION + '>'
s += '</' + RULE + '>'
return s
class LifecycleConfig(list):
"""
A container of rules associated with a lifecycle configuration.
"""
def __init__(self):
# Track if root tag has been seen.
self.has_root_tag = False
def startElement(self, name, attrs, connection):
if name == LIFECYCLE_CONFIG:
if self.has_root_tag:
raise InvalidLifecycleConfigError(
'Only one root tag is allowed in the XML')
self.has_root_tag = True
elif name == RULE:
if not self.has_root_tag:
raise InvalidLifecycleConfigError('Invalid root tag ' + name)
rule = Rule()
self.append(rule)
return rule
else:
raise InvalidLifecycleConfigError('Unsupported tag ' + name)
def endElement(self, name, value, connection):
if name == LIFECYCLE_CONFIG:
pass
else:
raise InvalidLifecycleConfigError('Unsupported end tag ' + name)
def to_xml(self):
"""Convert LifecycleConfig object into XML string representation."""
s = '<?xml version="1.0" encoding="UTF-8"?>'
s += '<' + LIFECYCLE_CONFIG + '>'
for rule in self:
s += rule.to_xml()
s += '</' + LIFECYCLE_CONFIG + '>'
return s
def add_rule(self, action, action_params, conditions):
"""
Add a rule to this Lifecycle configuration. This only adds the rule to
the local copy. To install the new rule(s) on the bucket, you need to
pass this Lifecycle config object to the configure_lifecycle method of
the Bucket object.
:type action: str
:param action: Action to be taken.
:type action_params: dict
:param action_params: A dictionary of action specific parameters. Each
item in the dictionary represents the name and value of an action
parameter.
:type conditions: dict
:param conditions: A dictionary of conditions that specify when the
action should be taken. Each item in the dictionary represents the name
and value of a condition.
"""
rule = Rule(action, action_params, conditions)
self.append(rule)
| mit |
HybridF5/jacket | jacket/db/sqlalchemy/migrate_repo/versions/036_compute_251_add_numa_topology_to_comput_nodes.py | 81 | 1166 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
compute_nodes = Table('compute_nodes', meta, autoload=True)
shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True)
numa_topology = Column('numa_topology', Text, nullable=True)
shadow_numa_topology = Column('numa_topology', Text, nullable=True)
compute_nodes.create_column(numa_topology)
shadow_compute_nodes.create_column(shadow_numa_topology)
| apache-2.0 |
ehirt/odoo | addons/resource/__init__.py | 448 | 1086 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import resource
import faces
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
chen0031/nupic | nupic/bindings/__init__.py | 33 | 1033 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
__import__("pkg_resources").declare_namespace(__name__)
| agpl-3.0 |
crowdhackathon-transport/optimizers | crowdstance-api/venv/lib/python2.7/site-packages/setuptools/command/bdist_egg.py | 286 | 18718 | """setuptools.command.bdist_egg
Build .egg distributions"""
# This module should be kept compatible with Python 2.3
import sys, os, marshal
from setuptools import Command
from distutils.dir_util import remove_tree, mkpath
try:
# Python 2.7 or >=3.2
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_python_lib, get_python_version
def _get_purelib():
return get_python_lib(False)
from distutils import log
from distutils.errors import DistutilsSetupError
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from types import CodeType
from setuptools.compat import basestring, next
from setuptools.extension import Library
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def write_stub(resource, pyfile):
f = open(pyfile,'w')
f.write('\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __loader__, __file__",
" import sys, pkg_resources, imp",
" __file__ = pkg_resources.resource_filename(__name__,%r)"
% resource,
" __loader__ = None; del __bootstrap__, __loader__",
" imp.load_dynamic(__name__,__file__)",
"__bootstrap__()",
"" # terminal \n
]))
f.close()
# stub __init__.py for packages distributed without one
NS_PKG_STUB = '__import__("pkg_resources").declare_namespace(__name__)'
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options (self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename+'.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files,[]
for item in old:
if isinstance(item,tuple) and len(item)==2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized==site_packages or normalized.startswith(
site_packages+os.sep
):
item = realpath[len(site_packages)+1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s" % self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self,cmdname,**kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname,self.bdist_dir)
kw.setdefault('skip_build',self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s" % self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root; instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p,ext_name) in enumerate(ext_outputs):
filename,ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename)+'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s" % ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep,'/')
to_compile.extend(self.make_init_files())
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root,'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s" % script_dir)
self.call_command('install_scripts',install_dir=script_dir,no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s" % native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s" % native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root,'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info,'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution,'dist_files',[]).append(
('bdist_egg',get_python_version(),self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base,dirs,files in walk_egg(self.bdist_dir):
for name in files:
if name.endswith('.py'):
path = os.path.join(base,name)
log.debug("Deleting %s", path)
os.unlink(path)
def zip_safe(self):
safe = getattr(self.distribution,'zip_safe',None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def make_init_files(self):
"""Create missing package __init__ files"""
init_files = []
for base,dirs,files in walk_egg(self.bdist_dir):
if base==self.bdist_dir:
# don't put an __init__ in the root
continue
for name in files:
if name.endswith('.py'):
if '__init__.py' not in files:
pkg = base[len(self.bdist_dir)+1:].replace(os.sep,'.')
if self.distribution.has_contents_for(pkg):
log.warn("Creating missing __init__.py for %s",pkg)
filename = os.path.join(base,'__init__.py')
if not self.dry_run:
f = open(filename,'w'); f.write(NS_PKG_STUB)
f.close()
init_files.append(filename)
break
else:
# not a package, don't traverse to subdirectories
dirs[:] = []
return init_files
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation',{}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = sys.version[:3]
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info,'')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir:''}
for base, dirs, files in os.walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base]+filename)
for filename in dirs:
paths[os.path.join(base,filename)] = paths[base]+filename+'/'
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext,Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir,filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = os.walk(egg_dir)
base,dirs,files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base,dirs,files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag,fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir,'EGG-INFO',fn)):
return flag
if not can_scan(): return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag,fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe)==flag:
f=open(fn,'wt'); f.write('\n'); f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base,name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir)+1:].replace(os.sep,'.')
module = pkg+(pkg and '.' or '')+os.path.splitext(name)[0]
if sys.version_info < (3, 3):
skip = 8 # skip magic & date
else:
skip = 12 # skip magic & date & file size
f = open(filename,'rb'); f.read(skip)
code = marshal.load(f); f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
if '__name__' in symbols and '__main__' in symbols and '.' not in module:
if sys.version[:3]=="2.4": # -m works w/zipfiles in 2.5
log.warn("%s: top-level module may be 'python -m' script", module)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names: yield name
for const in code.co_consts:
if isinstance(const,basestring):
yield const
elif isinstance(const,CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None,
mode='w'
):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir)+1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'" % p)
if compress is None:
compress = (sys.version>="2.4") # avoid 2.3 zipimport bug when 64 bits
compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)]
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in os.walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in os.walk(base_dir):
visit(None, dirname, files)
return zip_filename
#
| mit |
phenoxim/cinder | cinder/api/v2/snapshots.py | 3 | 6696 | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes snapshots api."""
from oslo_log import log as logging
from oslo_utils import strutils
from six.moves import http_client
import webob
from cinder.api import common
from cinder.api.openstack import wsgi
from cinder.api.schemas import snapshots as snapshot
from cinder.api import validation
from cinder.api.views import snapshots as snapshot_views
from cinder import utils
from cinder import volume
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
class SnapshotsController(wsgi.Controller):
"""The Snapshots API controller for the OpenStack API."""
_view_builder_class = snapshot_views.ViewBuilder
def __init__(self, ext_mgr=None):
self.volume_api = volume.API()
self.ext_mgr = ext_mgr
super(SnapshotsController, self).__init__()
def show(self, req, id):
"""Return data about the given snapshot."""
context = req.environ['cinder.context']
# Not found exception will be handled at the wsgi level
snapshot = self.volume_api.get_snapshot(context, id)
req.cache_db_snapshot(snapshot)
return self._view_builder.detail(req, snapshot)
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['cinder.context']
LOG.info("Delete snapshot with id: %s", id)
# Not found exception will be handled at the wsgi level
snapshot = self.volume_api.get_snapshot(context, id)
self.volume_api.delete_snapshot(context, snapshot)
return webob.Response(status_int=http_client.ACCEPTED)
def index(self, req):
"""Returns a summary list of snapshots."""
return self._items(req, is_detail=False)
def detail(self, req):
"""Returns a detailed list of snapshots."""
return self._items(req, is_detail=True)
def _items(self, req, is_detail=True):
"""Returns a list of snapshots, transformed through view builder."""
context = req.environ['cinder.context']
# Pop out non search_opts and create local variables
search_opts = req.GET.copy()
sort_keys, sort_dirs = common.get_sort_params(search_opts)
marker, limit, offset = common.get_pagination_params(search_opts)
# Filter out invalid options
allowed_search_options = ('status', 'volume_id', 'name')
utils.remove_invalid_filter_options(context, search_opts,
allowed_search_options)
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in search_opts:
search_opts['display_name'] = search_opts.pop('name')
snapshots = self.volume_api.get_all_snapshots(context,
search_opts=search_opts,
marker=marker,
limit=limit,
sort_keys=sort_keys,
sort_dirs=sort_dirs,
offset=offset)
req.cache_db_snapshots(snapshots.objects)
if is_detail:
snapshots = self._view_builder.detail_list(req, snapshots.objects)
else:
snapshots = self._view_builder.summary_list(req, snapshots.objects)
return snapshots
@wsgi.response(http_client.ACCEPTED)
@validation.schema(snapshot.create)
def create(self, req, body):
"""Creates a new snapshot."""
kwargs = {}
context = req.environ['cinder.context']
snapshot = body['snapshot']
kwargs['metadata'] = snapshot.get('metadata', None)
volume_id = snapshot['volume_id']
volume = self.volume_api.get(context, volume_id)
force = snapshot.get('force', False)
force = strutils.bool_from_string(force, strict=True)
LOG.info("Create snapshot from volume %s", volume_id)
self.validate_name_and_description(snapshot, check_length=False)
# NOTE(thingee): v2 API allows name instead of display_name
if 'name' in snapshot:
snapshot['display_name'] = snapshot.pop('name')
if force:
new_snapshot = self.volume_api.create_snapshot_force(
context,
volume,
snapshot.get('display_name'),
snapshot.get('description'),
**kwargs)
else:
new_snapshot = self.volume_api.create_snapshot(
context,
volume,
snapshot.get('display_name'),
snapshot.get('description'),
**kwargs)
req.cache_db_snapshot(new_snapshot)
return self._view_builder.detail(req, new_snapshot)
@validation.schema(snapshot.update)
def update(self, req, id, body):
"""Update a snapshot."""
context = req.environ['cinder.context']
snapshot_body = body['snapshot']
self.validate_name_and_description(snapshot_body, check_length=False)
if 'name' in snapshot_body:
snapshot_body['display_name'] = snapshot_body.pop('name')
if 'description' in snapshot_body:
snapshot_body['display_description'] = snapshot_body.pop(
'description')
# Not found exception will be handled at the wsgi level
snapshot = self.volume_api.get_snapshot(context, id)
volume_utils.notify_about_snapshot_usage(context, snapshot,
'update.start')
self.volume_api.update_snapshot(context, snapshot, snapshot_body)
snapshot.update(snapshot_body)
req.cache_db_snapshot(snapshot)
volume_utils.notify_about_snapshot_usage(context, snapshot,
'update.end')
return self._view_builder.detail(req, snapshot)
def create_resource(ext_mgr):
return wsgi.Resource(SnapshotsController(ext_mgr))
| apache-2.0 |
danwchan/trail_of_cthulhu | mythos_website_upgrade/birthcharacter/admin.py | 1 | 1941 | from django.contrib import admin
from .models import AbilityList, AbilityExamples, OccupationList, DriveList, DriveExamples, AssociatedOccuDrive, AssociatedOccuAbil, SpecialList
# Primary keys you care about
#primary_keys = [
# 'occupation',
# 'drive',
# 'ability'
# ]
# Inline projects to build the editing forms
class AbilityInLine(admin.StackedInline):
model = AssociatedOccuAbil
fk_name = 'associated_occupations'
extra = 0
class OccuInLine(admin.StackedInline):
model = AssociatedOccuDrive
fk_name = 'drive'
extra = 0
class AbilityExInLine(admin.StackedInline):
model = AbilityExamples
fk_name = 'ability'
extra = 0
class DriveExInLine(admin.StackedInline):
model = DriveExamples
fk_name = 'drive'
extra = 0
# ModelAdmin classes to bind it all together representing editing forms
class AbilityAdmin(admin.ModelAdmin):
list_display = ['ability', 'purist', 'pulp', 'rating']
search_fields = ['abilitylist__ability']
inlines = [
AbilityExInLine
]
class OccupationAdmin(admin.ModelAdmin):
list_display = ['occupation', 'purist', 'pulp', 'rating']
search_fields = ['occupationlist__occupation']
inlines = [
AbilityInLine
]
def _abilitylist(self, obj):
return obj.abilitylist.all().count() #just copied this over... I don't know what it does :P
class DriveAdmin(admin.ModelAdmin):
list_display = ['drive', 'purist', 'pulp', 'rating']
search_fields = ['abilitylist__ability']
inlines = [
DriveExInLine,
OccuInLine
]
# Register your models here.
admin.site.register(AbilityList, AbilityAdmin)
admin.site.register(OccupationList, OccupationAdmin)
admin.site.register(DriveList, DriveAdmin)
#TO BUILD
#overview page to see which records are old/poorly perofrming
#formatting to make it prettier
#expand drive examples to all entries and formalize the media source idea
| gpl-3.0 |
mrunge/horizon | openstack_dashboard/dashboards/admin/metering/urls.py | 2 | 1031 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.metering import views
urlpatterns = patterns(
'openstack_dashboard.dashboards.admin.metering.views',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^samples$', views.SamplesView.as_view(), name='samples'),
url(r'^report$', views.ReportView.as_view(), name='report'),
url(r'^report/csv$', views.CsvReportView.as_view(), name='csvreport'))
| apache-2.0 |
befair/sbcatalog | api/flaskapp.py | 2 | 2401 | # This file is part of sbcatalog
#
# sbcatalog is Copyright © 2015 beFair.it
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License version 3, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranties of MERCHANTABILITY,
# SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from eve import Eve
from endpoints import xml_collections_endpoint, geo_collections_endpoint
class XMLEve(Eve):
"""
This class aims to let Eve be able to import XML documents
It is meant to overload the view function `collections endpoint`.
It interprets the text/xml Content-Type and calls the `post` function
with the forged json payload.
"""
def __init__(self, *args, **kw):
"""
Init Eve and overload enpoints view_functions.
"""
super(XMLEve, self).__init__(*args, **kw)
# TODO: iterate over all resources
resource = 'supplier'
endpoint = resource + "|resource"
geo_resource = 'geosupplier'
geo_endpoint = geo_resource + "|resource"
self.view_functions[endpoint] = xml_collections_endpoint
self.view_functions[geo_endpoint] = geo_collections_endpoint
settings = self.config['DOMAIN'][resource]
geo_settings = self.config['DOMAIN'][geo_resource]
self.add_url_rule(self.api_prefix + '/gdxp/supplier',
endpoint,
view_func=xml_collections_endpoint,
methods=settings['resource_methods'] + ['OPTIONS'])
self.add_url_rule(self.api_prefix + '/geo/supplier',
geo_endpoint,
view_func=geo_collections_endpoint,
methods=geo_settings['resource_methods'] + ['OPTIONS'])
# MIGHT BE USEFUL
# url = '%s/%s' % (self.api_prefix, settings['url'])
# self.add_url_rule(url, endpoint, view_func=gdxp_collections_endpoint,
# methods=settings['resource_methods'] + ['OPTIONS'])
| agpl-3.0 |
ruibarreira/linuxtrail | usr/lib/python2.7/xml/etree/ElementInclude.py | 74 | 5076 | #
# ElementTree
# $Id: ElementInclude.py 3375 2008-02-13 08:05:08Z fredrik $
#
# limited xinclude support for element trees
#
# history:
# 2003-08-15 fl created
# 2003-11-14 fl fixed default loader
#
# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
#
# [email protected]
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2008 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
##
# Limited XInclude support for the ElementTree package.
##
import copy
from . import ElementTree
XINCLUDE = "{http://www.w3.org/2001/XInclude}"
XINCLUDE_INCLUDE = XINCLUDE + "include"
XINCLUDE_FALLBACK = XINCLUDE + "fallback"
##
# Fatal include error.
class FatalIncludeError(SyntaxError):
pass
##
# Default loader. This loader reads an included resource from disk.
#
# @param href Resource reference.
# @param parse Parse mode. Either "xml" or "text".
# @param encoding Optional text encoding.
# @return The expanded resource. If the parse mode is "xml", this
# is an ElementTree instance. If the parse mode is "text", this
# is a Unicode string. If the loader fails, it can return None
# or raise an IOError exception.
# @throws IOError If the loader fails to load the resource.
def default_loader(href, parse, encoding=None):
with open(href) as file:
if parse == "xml":
data = ElementTree.parse(file).getroot()
else:
data = file.read()
if encoding:
data = data.decode(encoding)
return data
##
# Expand XInclude directives.
#
# @param elem Root element.
# @param loader Optional resource loader. If omitted, it defaults
# to {@link default_loader}. If given, it should be a callable
# that implements the same interface as <b>default_loader</b>.
# @throws FatalIncludeError If the function fails to include a given
# resource, or if the tree contains malformed XInclude elements.
# @throws IOError If the function fails to load a given resource.
def include(elem, loader=None):
if loader is None:
loader = default_loader
# look for xinclude elements
i = 0
while i < len(elem):
e = elem[i]
if e.tag == XINCLUDE_INCLUDE:
# process xinclude directive
href = e.get("href")
parse = e.get("parse", "xml")
if parse == "xml":
node = loader(href, parse)
if node is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
node = copy.copy(node)
if e.tail:
node.tail = (node.tail or "") + e.tail
elem[i] = node
elif parse == "text":
text = loader(href, parse, e.get("encoding"))
if text is None:
raise FatalIncludeError(
"cannot load %r as %r" % (href, parse)
)
if i:
node = elem[i-1]
node.tail = (node.tail or "") + text + (e.tail or "")
else:
elem.text = (elem.text or "") + text + (e.tail or "")
del elem[i]
continue
else:
raise FatalIncludeError(
"unknown parse type in xi:include tag (%r)" % parse
)
elif e.tag == XINCLUDE_FALLBACK:
raise FatalIncludeError(
"xi:fallback tag must be child of xi:include (%r)" % e.tag
)
else:
include(e, loader)
i = i + 1
| gpl-3.0 |
gisweb/plomino.printdocuments | bootstrap-buildout.py | 172 | 6501 | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
parser.add_option("--setuptools-version",
help="use a specific setuptools version")
options, args = parser.parse_args()
######################################################################
# load/install setuptools
try:
if options.allow_site_packages:
import setuptools
import pkg_resources
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
sys.path[:] = [x for x in sys.path if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
if options.setuptools_version is not None:
setup_args['version'] = options.setuptools_version
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
try:
return not parsed_version.is_prerelease
except AttributeError:
# Older setuptools
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
| gpl-3.0 |
caot/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/geometry/test_data.py | 364 | 2994 | """
This module has the mock object definitions used to hold reference geometry
for the GEOS and GDAL tests.
"""
import gzip
import os
from django.contrib import gis
from django.utils import simplejson
# This global used to store reference geometry data.
GEOMETRIES = None
# Path where reference test data is located.
TEST_DATA = os.path.join(os.path.dirname(gis.__file__), 'tests', 'data')
def tuplize(seq):
"Turn all nested sequences to tuples in given sequence."
if isinstance(seq, (list, tuple)):
return tuple([tuplize(i) for i in seq])
return seq
def strconvert(d):
"Converts all keys in dictionary to str type."
return dict([(str(k), v) for k, v in d.iteritems()])
def get_ds_file(name, ext):
return os.path.join(TEST_DATA,
name,
name + '.%s' % ext
)
class TestObj(object):
"""
Base testing object, turns keyword args into attributes.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class TestDS(TestObj):
"""
Object for testing GDAL data sources.
"""
def __init__(self, name, **kwargs):
# Shapefile is default extension, unless specified otherwise.
ext = kwargs.pop('ext', 'shp')
self.ds = get_ds_file(name, ext)
super(TestDS, self).__init__(**kwargs)
class TestGeom(TestObj):
"""
Testing object used for wrapping reference geometry data
in GEOS/GDAL tests.
"""
def __init__(self, **kwargs):
# Converting lists to tuples of certain keyword args
# so coordinate test cases will match (JSON has no
# concept of tuple).
coords = kwargs.pop('coords', None)
if coords:
self.coords = tuplize(coords)
centroid = kwargs.pop('centroid', None)
if centroid:
self.centroid = tuple(centroid)
ext_ring_cs = kwargs.pop('ext_ring_cs', None)
if ext_ring_cs:
ext_ring_cs = tuplize(ext_ring_cs)
self.ext_ring_cs = ext_ring_cs
super(TestGeom, self).__init__(**kwargs)
class TestGeomSet(object):
"""
Each attribute of this object is a list of `TestGeom` instances.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, [TestGeom(**strconvert(kw)) for kw in value])
class TestDataMixin(object):
"""
Mixin used for GEOS/GDAL test cases that defines a `geometries`
property, which returns and/or loads the reference geometry data.
"""
@property
def geometries(self):
global GEOMETRIES
if GEOMETRIES is None:
# Load up the test geometry data from fixture into global.
gzf = gzip.GzipFile(os.path.join(TEST_DATA, 'geometries.json.gz'))
geometries = simplejson.loads(gzf.read())
GEOMETRIES = TestGeomSet(**strconvert(geometries))
return GEOMETRIES
| apache-2.0 |