repo_name
stringlengths 6
103
| path
stringlengths 4
209
| copies
stringclasses 325
values | size
stringlengths 4
7
| content
stringlengths 838
1.04M
| license
stringclasses 15
values |
---|---|---|---|---|---|
drewdru/AOI | imageSegmentation/sphc.py | 1 | 9832 | # # Superpixel Hierarchical Clustering algorithm (SPHC) For Image Segmentation
# The idea to create the algorithm came from difficulties in locating online resources describing image segmentation algorithms that use superpixels as a starting point. The conception of the algorithm further came from the observation that neighboring superpixels often have similarities in color and object boundaries are defined by color differences/similarities.
# The algorithm takes two main inputs: a RGB pixel grid to represent an image and a grid of segments from the sklearn SLIC superpixel-creating algorithm.
# After segment assignment happens, the superpixel hierarchical clustering takes place as follows
# 1. Loop through these four steps while the smallest cluster euclidean distance is below a specified threshold:
# 1a. Find neighboring segment pairs for each of the 1 through K superpixels.
# 1b. Get average R, G, and B values for each segment
# 1c. For each pair of neighboring segments, calculate euclidean distance using average R, G, and B values.
# 1d. Merge the 2 segments with the shortest RGB euclidean distance.
# 2. Output image.
# ##References
# 1. Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aure-lien Lucchi, Pascal Fua, and Sabine Susstrunk, SLIC Superpixels, EPFL Technical Report 149300, June 2010.
# 2. Adrian Rosebrock, A SLIC Superpixel Tutorial using Python, http://www.pyimagesearch.com/2014/07/28/a-slic-superpixel-tutorial-using-python, July 28, 2014.
# ### Superpixel Hierarchical Clustering algorithm (SPHC)
# ## Method and Code for merging Superpixels created by Paul Thompson (Please credit if you use this code)
# # The algorithm takes image superpixels created by the skimage SLIC function and merges neighors in order of
# # color similarity (using euclidean distance).
# #References:
# #1. http://www.kev-smith.com/papers/SLIC_Superpixels.pdf
# #2. http://www.pyimagesearch.com/2014/07/28/a-slic-superpixel-tutorial-using-python/
import matplotlib.pyplot as plt, argparse, numpy as np, math, sys, copy
from skimage.segmentation import slic, mark_boundaries
from skimage.util import img_as_float
from skimage import io
from collections import defaultdict
from PIL import Image, ImageFilter
from PyQt5.QtCore import QCoreApplication
def initiateSegmentAttributes(segm_grid, image):
'''
Each segment formed by sklearn's SLIC function is assigned a dictionary of attributes for efficiency.
:param segm_grid: Each pixel has been identified with a segment identifier by the skimage SLIC function
:param image: Each pixel has R, B, and G value associated with it
:return: Dictionary of dictionaries of attributes for each segment
'''
def initialSegmAttr():
return {'neighbors': set(), 'R': [], 'G': [], 'B': [], 'coord': set(),
'R_avg': 0.0, 'G_avg': 0.0, 'B_avg': 0.0}
segm_dict = defaultdict(initialSegmAttr)
for i in range(len(segm_grid)):
for j in range(len(segm_grid[i])):
if j != len(segm_grid[i]) - 1 and segm_grid[i][j] != segm_grid[i][j+1]:
segm_dict[segm_grid[i][j]]['neighbors'].add(segm_grid[i][j+1])
segm_dict[segm_grid[i][j+1]]['neighbors'].add(segm_grid[i][j])
if i != len(segm_grid) - 1 and segm_grid[i][j] != segm_grid[i+1][j]:
segm_dict[segm_grid[i][j]]['neighbors'].add(segm_grid[i+1][j])
segm_dict[segm_grid[i+1][j]]['neighbors'].add(segm_grid[i][j])
segm_dict[segm_grid[i][j]]['R'].append(image[i][j][0])
segm_dict[segm_grid[i][j]]['B'].append(image[i][j][1])
segm_dict[segm_grid[i][j]]['G'].append(image[i][j][2])
segm_dict[segm_grid[i][j]]['coord'].add((i,j))
return segm_dict
def getNearestNeighbors(segm_dict):
'''
Calculates the average R, B, and G values for each segment. Then finds the two neighboring segments with
the smallest euclidean distance (for the three dimensions of R, B, and G).
:param segm_dict: dictionary of dictionaries of segment attributes
:return: segment pair with smallest color euclidean distance; distance value
'''
for k, v in segm_dict.items():
v['R_avg'] = sum(v['R'])/len(v['R'])
v['B_avg'] = sum(v['B'])/len(v['B'])
v['G_avg'] = sum(v['R'])/len(v['G'])
neighbor_pairs = set()
nearest_neighbors = []
shortest_dist = 100.0
for k, v in segm_dict.items():
for neighbor in v['neighbors']:
neighbor_pair = tuple(sorted([k, neighbor]))
if neighbor_pair not in neighbor_pairs and k != neighbor:
neighbor_pairs.add(neighbor_pair)
eucl_dist = float(math.sqrt((v['R_avg'] - segm_dict[neighbor]['R_avg']) ** 2 +
(v['B_avg'] - segm_dict[neighbor]['B_avg']) ** 2 +
(v['G_avg'] - segm_dict[neighbor]['G_avg']) ** 2))
if eucl_dist < shortest_dist:
shortest_dist = eucl_dist
nearest_neighbors = neighbor_pair
return nearest_neighbors, shortest_dist
def mergeSegments(segm_dict, nearest_neighbors):
'''
Merges the pair of neighboring segments with the shortest euclidean distance (greatest color similarity)
:param segm_dict: dictionary of dictionaries of segment attributes
:param nearest_neighbors: segment pair with smallest color euclidean distance
:return: segm_dict: updated dictionary of dictionaries of segment attributes
'''
mergeto_dict = segm_dict[nearest_neighbors[0]]
mergefrom_dict = copy.deepcopy(segm_dict[nearest_neighbors[1]])
mergeto_dict['neighbors'] = mergeto_dict['neighbors'] | mergefrom_dict['neighbors']
mergeto_dict['neighbors'].discard(nearest_neighbors[0])
mergeto_dict['R'] += mergefrom_dict['R']
mergeto_dict['B'] += mergefrom_dict['B']
mergeto_dict['G'] += mergefrom_dict['G']
mergeto_dict['coord'] = mergeto_dict['coord'] | mergefrom_dict['coord']
for neighbor in mergefrom_dict['neighbors']:
segm_dict[neighbor]['neighbors'].add(nearest_neighbors[0])
segm_dict[neighbor]['neighbors'].discard(nearest_neighbors[1])
del segm_dict[nearest_neighbors[1]]
return segm_dict
def getSPHCsegments(segm_grid, image, numToMerge = 10, max_dist = 1.0, segCoords=None):
'''
Main function for running SPHC clustering algorithm. Initiates segment attributes. Then
iteratively finds and merges neighboring segments with most similar color.
:param segm_grid: Each pixel has been identified with a segment identifier by the skimage SLIC function
:param image: Each pixel has R, B, and G value associated with it
:param numToMerge: User input - number of segments to merge. Must be less than number of segments.
:param max_dist: Maximum euclidean distance for pair of segments to merge
:return: segm_grid: Each pixel has been identified with a segment identifier by the SPHC function
'''
print ("Initiating Segment Attributes...")
segm_dict = initiateSegmentAttributes(segm_grid, image)
shortest_dist = 0.0
merge_count = 0
print ("Merging Segments...")
while shortest_dist <= max_dist and merge_count <= numToMerge:
QCoreApplication.processEvents()
nearest_neighbors, shortest_dist = getNearestNeighbors(segm_dict)
segm_dict = mergeSegments(segm_dict, nearest_neighbors)
merge_count += 1
if merge_count % 20 == 0:
print (merge_count, "segments merged")
print (merge_count, "segments merged - final")
newSegmGrid = copy.deepcopy(segm_grid)
segNum = -1
segments = []
segmentsCount = 0
for k, v in segm_dict.items():
if segCoords in v['coord']:
segCoords = v['coord']
for coord in v['coord']:
newSegmGrid[coord[0], coord[1]] = int(k)
if k not in segments:
segmentsCount += 1
segments.append(k)
return segCoords, newSegmGrid, segmentsCount, segm_dict
def doSPHC(imagePath, outPath, numSegments = 500, Sigma = 4, segmentsToMerge = 50,
distance_limit = .1, pixMouse=None, isTest=False):
#SLIC Parameters:
# numSegments is How many superpixels to start with - input for SLIC function
# Sigma is parameter controls superpixel shape. Higher values make superpixels more square.
# SPHC Parameters:
# segmentsToMerge is How many superpixels to merge based on color similarity
# distance_limit is Limits which segments get merged based on their difference in average color
image = img_as_float(io.imread(imagePath))
SLICsegm_grid = slic(image, n_segments=numSegments, sigma=Sigma)
segCoords, SPHCsegm_grid, segmentsCount, segm_dict = getSPHCsegments(SLICsegm_grid, image, numToMerge=segmentsToMerge,
max_dist=distance_limit, segCoords=pixMouse)
if isTest:
image = img_as_float(io.imread(outPath))
data = mark_boundaries(image, SPHCsegm_grid)*256
img = Image.fromarray(np.asarray(np.clip(data, 0, 255), dtype="uint8"))
pixels = img.load()
size = img.size
tempImg = Image.new('RGB', size, color=0)
tempImgPixels = tempImg.load()
if segCoords is not None:
for pix in segCoords:
pixels[pix[1], pix[0]] = (255, 0, 0)
tempImgPixels[pix[1], pix[0]] = (255, 255, 255)
else:
tempImgData = mark_boundaries(tempImg, SPHCsegm_grid)*256
tempImg = Image.fromarray(np.asarray(np.clip(tempImgData, 0, 255), dtype="uint8"))
img.save(outPath)
# get image area, perimeter and Center of mass
data1 = np.asarray(tempImg, dtype="float")
imgTestFilters = tempImg.filter(ImageFilter.FIND_EDGES)
data2 = np.asarray(imgTestFilters, dtype="float")
return data1, data2, segmentsCount, segm_dict
| gpl-3.0 |
thientu/scikit-learn | sklearn/metrics/pairwise.py | 49 | 44088 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal, or the equivalent
check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like, shape = [n_samples_1], optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances([[3]], [[2]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[2]], [[3]])#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
szagoruyko/pyinn | pyinn/dgmm.py | 1 | 2606 | import torch
def cublas_dgmm(A, x, out=None):
if out is not None:
assert out.is_contiguous() and out.size() == A.size()
else:
out = A.new(A.size())
assert x.dim() == 1
assert x.numel() == A.size(-1) or x.numel() == A.size(0)
assert A.type() == x.type() == out.type()
assert A.is_contiguous()
if not isinstance(A, (torch.cuda.FloatTensor, torch.cuda.DoubleTensor)):
if x.numel() == A.size(-1):
return A.mm(torch.diag(x), out=out.view_as(A))
else:
return torch.diag(x).mm(A, out=out.view_as(A))
else:
if x.numel() == A.size(-1):
m, n = A.size(-1), A.numel() // A.size(-1)
mode = 'l'
# A.mm(x.diag(), out=out)
# return out
elif x.numel() == A.size(0):
n, m = A.size(0), A.numel() // A.size(0)
mode = 'r'
# if A.stride(0) == 1:
# mode = 'l'
# n, m = m, n
# x.diag().mm(A, out=out)
# return out
lda, ldc = m, m
incx = 1
handle = torch.cuda.current_blas_handle()
stream = torch.cuda.current_stream()._as_parameter_
from skcuda import cublas
cublas.cublasSetStream(handle, stream)
args = [handle, mode, m, n, A.data_ptr(), lda, x.data_ptr(), incx, out.data_ptr(), ldc]
if isinstance(A, torch.cuda.FloatTensor):
cublas.cublasSdgmm(*args)
elif isinstance(A, torch.cuda.DoubleTensor):
cublas.cublasDdgmm(*args)
return out
class DGMM(torch.autograd.Function):
def forward(self, input, x):
self.save_for_backward(input, x)
return cublas_dgmm(input, x)
def backward(self, grad_output):
input, x = self.saved_tensors
grad_input = grad_x = None
if self.needs_input_grad[0]:
grad_input = cublas_dgmm(grad_output.contiguous(), x)
assert grad_input.size() == input.size()
if self.needs_input_grad[1]:
dim = 0 if x.numel() == input.size(-1) else 1
grad_x = (grad_output * input).sum(dim).squeeze(dim)
# grad_x = grad_output.t().mm(input).diag()
assert grad_x.size() == x.size()
return grad_input, grad_x
def dgmm(input, x):
"""Multiplication with a diagonal matrix.
Used CUDA dgmm function, sometimes is faster than expand.
In torch functions does `input.mm(x.diag())`. Both left and right
mutliplications are supported.
Args:
input: 2D tensor
x: 1D tensor
"""
return DGMM()(input, x)
| mit |
ningchi/scikit-learn | sklearn/tree/export.py | 6 | 15622 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360./n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
thientu/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 259 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
Akshay0724/scikit-learn | sklearn/metrics/cluster/supervised.py | 25 | 31477 | """Utilities to evaluate the clustering performance of models.
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# Arnaud Fouchet <[email protected]>
# Thierry Guillemot <[email protected]>
# Gregory Stupp <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
from __future__ import division
from math import log
import numpy as np
from scipy.misc import comb
from scipy import sparse as sp
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
from ...utils.validation import check_array
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays."""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None, sparse=False):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps : None or float, optional.
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
sparse : boolean, optional.
If True, return a sparse CSR continency matrix. If ``eps is not None``,
and ``sparse is True``, will throw ValueError.
.. versionadded:: 0.18
Returns
-------
contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
Will be a ``scipy.sparse.csr_matrix`` if ``sparse=True``.
"""
if eps is not None and sparse:
raise ValueError("Cannot set 'eps' when sparse=True")
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = sp.coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int)
if sparse:
contingency = contingency.tocsr()
contingency.sum_duplicates()
else:
contingency = contingency.toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://link.springer.com/article/10.1007%2FBF01908075
.. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
n_classes = np.unique(labels_true).shape[0]
n_clusters = np.unique(labels_pred).shape[0]
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (n_classes == n_clusters == 1 or
n_classes == n_clusters == 0 or
n_classes == n_clusters == n_samples):
return 1.0
# Compute the ARI using the contingency data
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
sum_comb_c = sum(comb2(n_c) for n_c in np.ravel(contingency.sum(axis=1)))
sum_comb_k = sum(comb2(n_k) for n_k in np.ravel(contingency.sum(axis=0)))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.data)
prod_comb = (sum_comb_c * sum_comb_k) / comb(n_samples, 2)
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return (sum_comb - prod_comb) / (mean_comb - prod_comb)
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once.
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity : float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure : float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
MI = mutual_info_score(None, None, contingency=contingency)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness /
(homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity : float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings.
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency : {None, array, sparse matrix},
shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi : float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
else:
contingency = check_array(contingency,
accept_sparse=['csr', 'csc', 'coo'],
dtype=[int, np.int32, np.int64])
if isinstance(contingency, np.ndarray):
# For an array
nzx, nzy = np.nonzero(contingency)
nz_val = contingency[nzx, nzy]
elif sp.issparse(contingency):
# For a sparse matrix
nzx, nzy, nz_val = sp.find(contingency)
else:
raise ValueError("Unsupported type for 'contingency': %s" %
type(contingency))
contingency_sum = contingency.sum()
pi = np.ravel(contingency.sum(axis=1))
pj = np.ravel(contingency.sum(axis=0))
log_contingency_nm = np.log(nz_val)
contingency_nm = nz_val / contingency_sum
# Don't need to calculate the full outer product, just for non-zeroes
outer = pi.take(nzx) * pj.take(nzy)
log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<https://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64)
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1 or
classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64)
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def fowlkes_mallows_score(labels_true, labels_pred, sparse=False):
"""Measure the similarity of two clusterings of a set of points.
The Fowlkes-Mallows index (FMI) is defined as the geometric mean between of
the precision and recall::
FMI = TP / sqrt((TP + FP) * (TP + FN))
Where ``TP`` is the number of **True Positive** (i.e. the number of pair of
points that belongs in the same clusters in both ``labels_true`` and
``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the
number of pair of points that belongs in the same clusters in
``labels_true`` and not in ``labels_pred``) and ``FN`` is the number of
**False Negative** (i.e the number of pair of points that belongs in the
same clusters in ``labels_pred`` and not in ``labels_True``).
The score ranges from 0 to 1. A high value indicates a good similarity
between two clusters.
Read more in the :ref:`User Guide <fowlkes_mallows_scores>`.
Parameters
----------
labels_true : int array, shape = (``n_samples``,)
A clustering of the data into disjoint subsets.
labels_pred : array, shape = (``n_samples``, )
A clustering of the data into disjoint subsets.
Returns
-------
score : float
The resulting Fowlkes-Mallows score.
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import fowlkes_mallows_score
>>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally random, hence the FMI is null::
>>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two
hierarchical clusterings". Journal of the American Statistical
Association
<http://wildfire.stat.ucla.edu/pdflibrary/fowlkes.pdf>`_
.. [2] `Wikipedia entry for the Fowlkes-Mallows Index
<https://en.wikipedia.org/wiki/Fowlkes-Mallows_index>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples, = labels_true.shape
c = contingency_matrix(labels_true, labels_pred, sparse=True)
tk = np.dot(c.data, c.data) - n_samples
pk = np.sum(np.asarray(c.sum(axis=0)).ravel() ** 2) - n_samples
qk = np.sum(np.asarray(c.sum(axis=1)).ravel() ** 2) - n_samples
return tk / np.sqrt(pk * qk) if tk != 0. else 0.
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float64)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
dchaplinsky/pep.org.ua | pepdb/tasks/management/commands/load_adhoc_datasets.py | 1 | 6291 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import logging
import argparse
from copy import copy
from hashlib import sha1
from django.core.management.base import BaseCommand
from django.db.utils import IntegrityError
from django.utils import timezone
import tqdm
from dateutil.parser import parse as dt_parse
from unicodecsv import DictReader
from core.elastic_models import Person as ElasticPerson
from tasks.models import AdHocMatch
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger("importer")
class Command(BaseCommand):
help = """Highly volatile and expiremental stuff: a script that reads
and matches arbitrary datasets with names with the list of persons in DB"""
def add_arguments(self, parser):
parser.add_argument(
'dataset_file', type=argparse.FileType('r'),
help='Any dataset in the following formats: json, jsonlines, csv',
)
parser.add_argument(
'dataset_identifier',
help='Dataset name (will be displayed in admin)',
)
parser.add_argument(
'--filetype',
choices=("json", "jsonlines", "csv"),
required=True,
help='Format of the dataset',
)
parser.add_argument(
'--name_field',
nargs="+",
help='fields from dataset to use for the search'
)
parser.add_argument(
'--render_field',
nargs="*",
help='fields from dataset to use for the search'
)
parser.add_argument(
'--dedup_field',
nargs="*",
help='fields from dataset to use to avoid duplicates after repeated runs'
)
parser.add_argument(
'--last_updated_from_dataset',
help='The date of the export of the dataset'
)
def iter_dataset(self, fp, filetype):
if filetype == "json":
for l in json.load(fp):
yield l
elif filetype == "jsonlines":
for l in fp:
yield json.loads(l)
elif filetype == "csv":
r = DictReader(fp)
for l in r:
yield l
def get_name(self, doc, fields):
return " ".join(filter(None, (doc.get(x, None) for x in fields)))
def search_for_person(self, name):
base_q = {
"query": name,
"operator": "and",
"fuzziness": 0,
"fields": ["full_name", "names", "full_name_en", "also_known_as_uk", "also_known_as_en"]
}
fuzziness = 0
while fuzziness < 3:
base_q["fuzziness"] = fuzziness
s = ElasticPerson.search().query({
"multi_match": base_q
})
if s.count():
return s.execute(), fuzziness
fuzziness += 1
return [], 0
def get_default_render_fields(self, doc, name_fields):
return sorted(k for k in doc.keys() if k not in name_fields)
def represent_entry_from_dataset(self, doc, options):
render_fields = options.get("render_field")
if render_fields is None:
render_fields = self.get_default_render_fields(doc, options["name_field"])
return (
tuple((k, doc.get(k)) for k in options["name_field"]) +
tuple((k, doc.get(k)) for k in render_fields)
)
def get_doc_hash(self, doc, options):
dedup_fields = options.get("dedup_field")
if dedup_fields is None:
if options.get("render_field") is None:
dedup_fields = self.get_default_render_fields(doc, options["name_field"])
else:
dedup_fields = copy(options["render_field"])
dedup_fields += options["name_field"]
return sha1(json.dumps(
{k: doc.get(k) for k in sorted(dedup_fields)}
)).hexdigest()
def handle(self, *args, **options):
if "last_updated_from_dataset" in options:
last_updated = timezone.make_aware(dt_parse(options["last_updated_from_dataset"], dayfirst=True))
else:
last_updated = timezone.now()
with tqdm.tqdm() as pbar:
for i, item in enumerate(self.iter_dataset(options["dataset_file"], options["filetype"])):
pbar.update(1)
doc_hash = self.get_doc_hash(item, options)
name = self.get_name(item, options["name_field"])
if name:
rpr = dict(self.represent_entry_from_dataset(item, options))
found_persons, fuzziness = self.search_for_person(name)
for res in found_persons:
try:
obj, created = AdHocMatch.objects.get_or_create(
matched_json_hash=doc_hash,
dataset_id=options["dataset_identifier"],
person_id=res.id,
defaults={
"pep_name": res.full_name,
"pep_position": "{} @ {}".format(
getattr(res, "last_job_title", ""),
getattr(res, "last_workplace", "")
),
"matched_json": rpr,
"name_match_score": fuzziness,
"last_updated_from_dataset": last_updated,
"first_updated_from_dataset": last_updated,
"name_in_dataset": name
}
)
if not created and last_updated > obj.last_updated_from_dataset:
obj.last_updated_from_dataset = last_updated
obj.name_in_dataset = name
obj.matched_json = rpr
obj.save()
except IntegrityError:
logger.warning("Cannot find person {} with key {} in db".format(res.full_name, res.id))
| mit |
Akshay0724/scikit-learn | sklearn/linear_model/__init__.py | 82 | 3139 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .huber import HuberRegressor
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'HuberRegressor',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
nils-werner/SimpleCV | SimpleCV/ImageClass.py | 8 | 519091 | # Load required libraries
from SimpleCV.base import *
from SimpleCV.Color import *
from SimpleCV.LineScan import *
from numpy import int32
from numpy import uint8
import cv2
from EXIF import *
if not init_options_handler.headless:
import pygame as pg
import scipy.ndimage as ndimage
import scipy.stats.stats as sss #for auto white balance
import scipy.cluster.vq as scv
import scipy.linalg as nla # for linear algebra / least squares
import math # math... who does that
import copy # for deep copy
#import scipy.stats.mode as spsmode
class ColorSpace:
"""
**SUMMARY**
The colorspace class is used to encapsulate the color space of a given image.
This class acts like C/C++ style enumerated type.
See: http://stackoverflow.com/questions/2122706/detect-color-space-with-opencv
"""
UNKNOWN = 0
BGR = 1
GRAY = 2
RGB = 3
HLS = 4
HSV = 5
XYZ = 6
YCrCb = 7
class ImageSet(list):
"""
**SUMMARY**
This is an abstract class for keeping a list of images. It has a few
advantages in that you can use it to auto load data sets from a directory
or the net.
Keep in mind it inherits from a list too, so all the functionality a
normal python list has this will too.
**EXAMPLES**
>>> imgs = ImageSet()
>>> imgs.download("ninjas")
>>> imgs.show(ninjas)
or you can load a directory path:
>>> imgs = ImageSet('/path/to/imgs/')
>>> imgs.show()
This will download and show a bunch of random ninjas. If you want to
save all those images locally then just use:
>>> imgs.save()
You can also load up the sample images that come with simplecv as:
>>> imgs = ImageSet('samples')
>>> imgs.filelist
>>> logo = imgs.find('simplecv.png')
**TO DO**
Eventually this should allow us to pull image urls / paths from csv files.
The method also allow us to associate an arbitraty bunch of data with each
image, and on load/save pickle that data or write it to a CSV file.
"""
filelist = None
def __init__(self, directory = None):
if not directory:
return
if isinstance(directory,list):
if isinstance(directory[0], Image):
super(ImageSet,self).__init__(directory)
elif isinstance(directory[0], str) or isinstance(directory[0], unicode):
super(ImageSet,self).__init__(map(Image, directory))
elif directory.lower() == 'samples' or directory.lower() == 'sample':
pth = LAUNCH_PATH
pth = os.path.realpath(pth)
directory = os.path.join(pth, 'sampleimages')
self.load(directory)
else:
self.load(directory)
def download(self, tag=None, number=10, size='thumb'):
"""
**SUMMARY**
This function downloads images from Google Image search based
on the tag you provide. The number is the number of images you
want to have in the list. Valid values for size are 'thumb', 'small',
'medium', 'large' or a tuple of exact dimensions i.e. (640,480).
Note that 'thumb' is exceptionally faster than others.
.. Warning::
This requires the python library Beautiful Soup to be installed
http://www.crummy.com/software/BeautifulSoup/
**PARAMETERS**
* *tag* - A string of tag values you would like to download.
* *number* - An integer of the number of images to try and download.
* *size* - the size of the images to download. Valid options a tuple
of the exact size or a string of the following approximate sizes:
* thumb ~ less than 128x128
* small ~ approximately less than 640x480 but larger than 128x128
* medium ~ approximately less than 1024x768 but larger than 640x480.
* large ~ > 1024x768
**RETURNS**
Nothing - but caches local copy of images.
**EXAMPLE**
>>> imgs = ImageSet()
>>> imgs.download("ninjas")
>>> imgs.show(ninjas)
"""
try:
from BeautifulSoup import BeautifulSoup
except:
print "You need to install Beatutiul Soup to use this function"
print "to install you can use:"
print "easy_install beautifulsoup"
return
INVALID_SIZE_MSG = """I don't understand what size images you want.
Valid options: 'thumb', 'small', 'medium', 'large'
or a tuple of exact dimensions i.e. (640,480)."""
if isinstance(size, basestring):
size = size.lower()
if size == 'thumb':
size_param = ''
elif size == 'small':
size_param = '&tbs=isz:s'
elif size == 'medium':
size_param = '&tbs=isz:m'
elif size == 'large':
size_param = '&tbs=isz:l'
else:
print INVALID_SIZE_MSG
return None
elif type(size) == tuple:
width, height = size
size_param = '&tbs=isz:ex,iszw:' + str(width) + ',iszh:' + str(height)
else:
print INVALID_SIZE_MSG
return None
# Used to extract imgurl parameter value from a URL
imgurl_re = re.compile('(?<=(&|\?)imgurl=)[^&]*((?=&)|$)')
add_set = ImageSet()
candidate_count = 0
while len(add_set) < number:
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
url = ("http://www.google.com/search?tbm=isch&q=" + urllib2.quote(tag) +
size_param + "&start=" + str(candidate_count))
page = opener.open(url)
soup = BeautifulSoup(page)
img_urls = []
# Gets URLs of the thumbnail images
if size == 'thumb':
imgs = soup.findAll('img')
for img in imgs:
dl_url = str(dict(img.attrs)['src'])
img_urls.append(dl_url)
# Gets the direct image URLs
else:
for link_tag in soup.findAll('a', {'href': re.compile('imgurl=')}):
dirty_url = link_tag.get('href') # URL to an image as given by Google Images
dl_url = str(re.search(imgurl_re, dirty_url).group()) # The direct URL to the image
img_urls.append(dl_url)
for dl_url in img_urls:
try:
add_img = Image(dl_url, verbose=False)
# Don't know a better way to check if the image was actually returned
if add_img.height <> 0 and add_img.width <> 0:
add_set.append(add_img)
except:
#do nothing
None
if len(add_set) >= number:
break
self.extend(add_set)
def upload(self,dest,api_key=None,api_secret=None, verbose = True):
"""
**SUMMARY**
Uploads all the images to imgur or flickr or dropbox. In verbose mode URL values are printed.
**PARAMETERS**
* *api_key* - a string of the API key.
* *api_secret* - (required only for flickr and dropbox ) a string of the API secret.
* *verbose* - If verbose is true all values are printed to the screen
**RETURNS**
if uploading is successful
- Imgur return the original image URL on success and None if it fails.
- Flick returns True on success, else returns False.
- dropbox returns True on success.
**EXAMPLE**
TO upload image to imgur::
>>> imgset = ImageSet("/home/user/Desktop")
>>> result = imgset.upload( 'imgur',"MY_API_KEY1234567890" )
>>> print "Uploaded To: " + result[0]
To upload image to flickr::
>>> imgset.upload('flickr','api_key','api_secret')
>>> imgset.upload('flickr') #Once the api keys and secret keys are cached.
To upload image to dropbox::
>>> imgset.upload('dropbox','api_key','api_secret')
>>> imgset.upload('dropbox') #Once the api keys and secret keys are cached.
**NOTES**
.. Warning::
This method requires two packages to be installed
-PyCurl
-flickr api.
-dropbox
.. Warning::
You must supply your own API key.
Find more about API keys:
- http://imgur.com/register/api_anon
- http://www.flickr.com/services/api/misc.api_keys.html
- https://www.dropbox.com/developers/start/setup#python
"""
try :
for i in self:
i.upload(dest,api_key,api_secret, verbose)
return True
except :
return False
def show(self, showtime = 0.25):
"""
**SUMMARY**
This is a quick way to show all the items in a ImageSet.
The time is in seconds. You can also provide a decimal value, so
showtime can be 1.5, 0.02, etc.
to show each image.
**PARAMETERS**
* *showtime* - the time, in seconds, to show each image in the set.
**RETURNS**
Nothing.
**EXAMPLE**
>>> imgs = ImageSet()
>>> imgs.download("ninjas")
>>> imgs.show()
"""
for i in self:
i.show()
time.sleep(showtime)
def _get_app_ext(self, loops=0):
""" Application extention. Part that secifies amount of loops.
if loops is 0, if goes on infinitely.
"""
bb = "\x21\xFF\x0B" # application extension
bb += "NETSCAPE2.0"
bb += "\x03\x01"
if loops == 0:
loops = 2**16-1
bb += int_to_bin(loops)
bb += '\x00' # end
return bb
def _get_graphics_control_ext(self, duration=0.1):
""" Graphics Control Extension. A sort of header at the start of
each image. Specifies transparancy and duration. """
bb = '\x21\xF9\x04'
bb += '\x08' # no transparency
bb += int_to_bin( int(duration*100) ) # in 100th of seconds
bb += '\x00' # no transparent color
bb += '\x00' # end
return bb
def _write_gif(self, filename, duration=0.1, loops=0, dither=1):
""" Given a set of images writes the bytes to the specified stream.
"""
frames = 0
previous = None
fp = open(filename, 'wb')
if not PIL_ENABLED:
logger.warning("Need PIL to write animated gif files.")
return
converted = []
for img in self:
if not isinstance(img,pil.Image):
pil_img = img.getPIL()
else:
pil_img = img
converted.append((pil_img.convert('P',dither=dither), img._get_header_anim()))
try:
for img, header_anim in converted:
if not previous:
# gather data
palette = getheader(img)[1]
data = getdata(img)
imdes, data = data[0], data[1:]
header = header_anim
appext = self._get_app_ext(loops)
graphext = self._get_graphics_control_ext(duration)
# write global header
fp.write(header)
fp.write(palette)
fp.write(appext)
# write image
fp.write(graphext)
fp.write(imdes)
for d in data:
fp.write(d)
else:
# gather info (compress difference)
data = getdata(img)
imdes, data = data[0], data[1:]
graphext = self._get_graphics_control_ext(duration)
# write image
fp.write(graphext)
fp.write(imdes)
for d in data:
fp.write(d)
previous = img.copy()
frames = frames + 1
fp.write(";") # end gif
finally:
fp.close()
return frames
def save(self, destination=None, dt=0.2, verbose = False, displaytype=None):
"""
**SUMMARY**
This is a quick way to save all the images in a data set.
Or to Display in webInterface.
If you didn't specify a path one will randomly be generated.
To see the location the files are being saved to then pass
verbose = True.
**PARAMETERS**
* *destination* - path to which images should be saved, or name of gif
* file. If this ends in .gif, the pictures will be saved accordingly.
* *dt* - time between frames, for creating gif files.
* *verbose* - print the path of the saved files to the console.
* *displaytype* - the method use for saving or displaying images.
valid values are:
* 'notebook' - display to the ipython notebook.
* None - save to a temporary file.
**RETURNS**
Nothing.
**EXAMPLE**
>>> imgs = ImageSet()
>>> imgs.download("ninjas")
>>> imgs.save(destination="ninjas_folder", verbose=True)
>>> imgs.save(destination="ninjas.gif", verbose=True)
"""
if displaytype=='notebook':
try:
from IPython.core.display import Image as IPImage
except ImportError:
print "You need IPython Notebooks to use this display mode"
return
from IPython.core import display as Idisplay
for i in self:
tf = tempfile.NamedTemporaryFile(suffix=".png")
loc = tf.name
tf.close()
i.save(loc)
Idisplay.display(IPImage(filename=loc))
return
else:
if destination:
if destination.endswith(".gif"):
return self._write_gif(destination, dt)
else:
for i in self:
i.save(path=destination, temp=True, verbose=verbose)
else:
for i in self:
i.save(verbose=verbose)
def showPaths(self):
"""
**SUMMARY**
This shows the file paths of all the images in the set.
If they haven't been saved to disk then they will not have a filepath
**RETURNS**
Nothing.
**EXAMPLE**
>>> imgs = ImageSet()
>>> imgs.download("ninjas")
>>> imgs.save(verbose=True)
>>> imgs.showPaths()
**TO DO**
This should return paths as a list too.
"""
for i in self:
print i.filename
def _read_gif(self, filename):
""" read_gif(filename)
Reads images from an animated GIF file. Returns the number of images loaded.
"""
if not PIL_ENABLED:
return
elif not os.path.isfile(filename):
return
pil_img = pil.open(filename)
pil_img.seek(0)
pil_images = []
try:
while True:
pil_images.append(pil_img.copy())
pil_img.seek(pil_img.tell()+1)
except EOFError:
pass
loaded = 0
for img in pil_images:
self.append(Image(img))
loaded += 1
return loaded
def load(self, directory = None, extension = None, sort_by=None):
"""
**SUMMARY**
This function loads up files automatically from the directory you pass
it. If you give it an extension it will only load that extension
otherwise it will try to load all know file types in that directory.
extension should be in the format:
extension = 'png'
**PARAMETERS**
* *directory* - The path or directory from which to load images.
* *extension* - The extension to use. If none is given png is the default.
* *sort_by* - Sort the directory based on one of the following parameters passed as strings.
* *time* - the modification time of the file.
* *name* - the name of the file.
* *size* - the size of the file.
The default behavior is to leave the directory unsorted.
**RETURNS**
The number of images in the image set.
**EXAMPLE**
>>> imgs = ImageSet()
>>> imgs.load("images/faces")
>>> imgs.load("images/eyes", "png")
"""
if not directory:
logger.warning("You need to give a directory to load files from.")
return
if not os.path.exists(directory):
logger.warning( "Invalid image path given.")
return
if extension:
#regexes to ignore case
regexList = [ '[' + letter + letter.upper() + ']' for letter in extension]
regex = ''.join(regexList)
regex = "*." + regex
formats = [os.path.join(directory, regex)]
else:
formats = [os.path.join(directory, x) for x in IMAGE_FORMATS]
file_set = [glob.glob(p) for p in formats]
full_set = []
for f in file_set:
for i in f:
full_set.append(i)
file_set = full_set
if(sort_by is not None):
if( sort_by.lower() == "time"):
file_set = sorted(file_set,key=os.path.getmtime)
if( sort_by.lower() == "name"):
file_set = sorted(file_set)
if( sort_by.lower() == "size"):
file_set = sorted(file_set,key=os.path.getsize)
self.filelist = dict()
for i in file_set:
tmp = None
try:
tmp = Image(i)
if( tmp is not None and tmp.width > 0 and tmp.height > 0):
if sys.platform.lower() == 'win32' or sys.platform.lower() == 'win64':
self.filelist[tmp.filename.split('\\')[-1]] = tmp
else:
self.filelist[tmp.filename.split('/')[-1]] = tmp
self.append(tmp)
except:
continue
return len(self)
def standardize(self,width,height):
"""
**SUMMARY**
Resize every image in the set to a standard size.
**PARAMETERS**
* *width* - the width that we want for every image in the set.
* *height* - the height that we want for every image in the set.
**RETURNS**
A new image set where every image in the set is scaled to the desired size.
**EXAMPLE**
>>>> iset = ImageSet("./b/")
>>>> thumbnails = iset.standardize(64,64)
>>>> for t in thumbnails:
>>>> t.show()
"""
retVal = ImageSet()
for i in self:
retVal.append(i.resize(width,height))
return retVal
def dimensions(self):
"""
**SUMMARY**
Return an np.array that are the width and height of every image in the image set.
**PARAMETERS**
--NONE--
**RETURNS**
A 2xN numpy array where N is the number of images in the set. The first column is
the width, and the second collumn is the height.
**EXAMPLE**
>>> iset = ImageSet("./b/")
>>> sz = iset.dimensions()
>>> np.max(sz[:,0]) # returns the largest width in the set.
"""
retVal = []
for i in self:
retVal.append((i.width,i.height))
return np.array(retVal)
def average(self, mode="first", size=(None,None)):
"""
**SUMMARY**
Casts each in the image set into a 32F image, averages them together and returns the results.
If the images are different sizes the method attempts to standarize them.
**PARAMETERS**
* *mode* -
* "first" - resize everything to the size of the first image.
* "max" - resize everything to be the max width and max height of the set.
* "min" - resize everything to be the min width and min height of the set.
* "average" - resize everything to be the average width and height of the set
* "fixed" - fixed, use the size tuple provided.
* *size* - if the mode is set to fixed use this tuple as the size of the resulting image.
**RETURNS**
Returns a single image that is the average of all the values.
**EXAMPLE**
>>> imgs = ImageSet()
>>> imgs.load("images/faces")
>>> result = imgs.average(mode="first")
>>> result.show()
**TODO**
* Allow the user to pass in an offset parameters that blit the images into the resutl.
"""
fw = 0
fh = 0
# figger out how we will handle everything
if( len(self) <= 0 ):
return ImageSet()
vals = self.dimensions()
if( mode.lower() == "first" ):
fw = self[0].width
fh = self[0].height
elif( mode.lower() == "fixed" ):
fw = size[0]
fh = size[1]
elif( mode.lower() == "max" ):
fw = np.max(vals[:,0])
fh = np.max(vals[:,1])
elif( mode.lower() == "min" ):
fw = np.min(vals[:,0])
fh = np.min(vals[:,1])
elif( mode.lower() == "average" ):
fw = int(np.average(vals[:,0]))
fh = int(np.average(vals[:,1]))
#determine if we really need to resize the images
t1 = np.sum(vals[:,0]-fw)
t2 = np.sum(vals[:,1]-fh)
if( t1 != 0 or t2 != 0 ):
resized = self.standardize(fw,fh)
else:
resized = self
# Now do the average calculation
accumulator = cv.CreateImage((fw,fh), cv.IPL_DEPTH_8U,3)
cv.Zero(accumulator)
alpha = float(1.0/len(resized))
beta = float((len(resized)-1.0)/len(resized))
for i in resized:
cv.AddWeighted(i.getBitmap(),alpha,accumulator,beta,0,accumulator)
retVal = Image(accumulator)
return retVal
def __getitem__(self,key):
"""
**SUMMARY**
Returns a ImageSet when sliced. Previously used to
return list. Now it is possible to ImageSet member
functions on sub-lists
"""
if type(key) is types.SliceType: #Or can use 'try:' for speed
return ImageSet(list.__getitem__(self, key))
else:
return list.__getitem__(self,key)
def __getslice__(self, i, j):
"""
Deprecated since python 2.0, now using __getitem__
"""
return self.__getitem__(slice(i,j))
class Image:
"""
**SUMMARY**
The Image class is the heart of SimpleCV and allows you to convert to and
from a number of source types with ease. It also has intelligent buffer
management, so that modified copies of the Image required for algorithms
such as edge detection, etc can be cached and reused when appropriate.
Image are converted into 8-bit, 3-channel images in RGB colorspace. It will
automatically handle conversion from other representations into this
standard format. If dimensions are passed, an empty image is created.
**EXAMPLE**
>>> i = Image("/path/to/image.png")
>>> i = Camera().getImage()
You can also just load the SimpleCV logo using:
>>> img = Image("simplecv")
>>> img = Image("logo")
>>> img = Image("logo_inverted")
>>> img = Image("logo_transparent")
Or you can load an image from a URL:
>>> img = Image("http://www.simplecv.org/image.png")
"""
width = 0 #width and height in px
height = 0
depth = 0
filename = "" #source filename
filehandle = "" #filehandle if used
camera = ""
_mLayers = []
_mDoHuePalette = False
_mPaletteBins = None
_mPalette = None
_mPaletteMembers = None
_mPalettePercentages = None
_barcodeReader = "" #property for the ZXing barcode reader
#these are buffer frames for various operations on the image
_bitmap = "" #the bitmap (iplimage) representation of the image
_matrix = "" #the matrix (cvmat) representation
_grayMatrix = "" #the gray scale (cvmat) representation -KAS
_graybitmap = "" #a reusable 8-bit grayscale bitmap
_equalizedgraybitmap = "" #the above bitmap, normalized
_blobLabel = "" #the label image for blobbing
_edgeMap = "" #holding reference for edge map
_cannyparam = (0, 0) #parameters that created _edgeMap
_pil = "" #holds a PIL object in buffer
_numpy = "" #numpy form buffer
_grayNumpy = "" # grayscale numpy for keypoint stuff
_colorSpace = ColorSpace.UNKNOWN #Colorspace Object
_pgsurface = ""
_cv2Numpy = None #numpy array for OpenCV >= 2.3
_cv2GrayNumpy = None #grayscale numpy array for OpenCV >= 2.3
_gridLayer = [None,[0,0]]#to store grid details | Format -> [gridIndex , gridDimensions]
#For DFT Caching
_DFT = [] #an array of 2 channel (real,imaginary) 64F images
#Keypoint caching values
_mKeyPoints = None
_mKPDescriptors = None
_mKPFlavor = "NONE"
#temp files
_tempFiles = []
#when we empty the buffers, populate with this:
_initialized_buffers = {
"_bitmap": "",
"_matrix": "",
"_grayMatrix": "",
"_graybitmap": "",
"_equalizedgraybitmap": "",
"_blobLabel": "",
"_edgeMap": "",
"_cannyparam": (0, 0),
"_pil": "",
"_numpy": "",
"_grayNumpy":"",
"_pgsurface": "",
"_cv2GrayNumpy": "",
"_cv2Numpy":""}
#The variables _uncroppedX and _uncroppedY are used to buffer the points when we crop the image.
_uncroppedX = 0
_uncroppedY = 0
def __repr__(self):
if len(self.filename) == 0:
fn = "None"
else:
fn = self.filename
return "<SimpleCV.Image Object size:(%d, %d), filename: (%s), at memory location: (%s)>" % (self.width, self.height, fn, hex(id(self)))
#initialize the frame
#parameters: source designation (filename)
#todo: handle camera/capture from file cases (detect on file extension)
def __init__(self, source = None, camera = None, colorSpace = ColorSpace.UNKNOWN,verbose=True, sample=False, cv2image=False, webp=False):
"""
**SUMMARY**
The constructor takes a single polymorphic parameter, which it tests
to see how it should convert into an RGB image. Supported types include:
**PARAMETERS**
* *source* - The source of the image. This can be just about anything, a numpy arrray, a file name, a width and height
tuple, a url. Certain strings such as "lenna" or "logo" are loaded automatically for quick testing.
* *camera* - A camera to pull a live image.
* *colorspace* - A default camera color space. If none is specified this will usually default to the BGR colorspace.
* *sample* - This is set to true if you want to load some of the included sample images without having to specify the complete path
**EXAMPLES**
>>> img = Image('simplecv')
>>> img = Image('test.png')
>>> img = Image('http://www.website.com/my_image.jpg')
>>> img.show()
**NOTES**
OpenCV: iplImage and cvMat types
Python Image Library: Image type
Filename: All opencv supported types (jpg, png, bmp, gif, etc)
URL: The source can be a url, but must include the http://
"""
self._mLayers = []
self.camera = camera
self._colorSpace = colorSpace
#Keypoint Descriptors
self._mKeyPoints = []
self._mKPDescriptors = []
self._mKPFlavor = "NONE"
#Pallete Stuff
self._mDoHuePalette = False
self._mPaletteBins = None
self._mPalette = None
self._mPaletteMembers = None
self._mPalettePercentages = None
#Temp files
self._tempFiles = []
#Check if need to load from URL
#(this can be made shorter)if type(source) == str and (source[:7].lower() == "http://" or source[:8].lower() == "https://"):
if isinstance(source, basestring) and (source.lower().startswith("http://") or source.lower().startswith("https://")):
#try:
# added spoofed user agent for images that are blocking bots (like wikipedia)
req = urllib2.Request(source, headers={'User-Agent' : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.54 Safari/536.5"})
img_file = urllib2.urlopen(req)
#except:
#if verbose:
#print "Couldn't open Image from URL:" + source
#return None
im = StringIO(img_file.read())
source = pil.open(im).convert("RGB")
#Check if loaded from base64 URI
if isinstance(source, basestring) and (source.lower().startswith("data:image/png;base64,")):
img = source[22:].decode("base64")
im = StringIO(img)
source = pil.open(im).convert("RGB")
#This section loads custom built-in images
if isinstance(source, basestring):
tmpname = source.lower()
if tmpname == "simplecv" or tmpname == "logo":
imgpth = os.path.join(LAUNCH_PATH, 'sampleimages','simplecv.png')
source = imgpth
elif tmpname == "simplecv_inverted" or tmpname == "inverted" or tmpname == "logo_inverted":
imgpth = os.path.join(LAUNCH_PATH, 'sampleimages','simplecv_inverted.png')
source = imgpth
elif tmpname == "lenna":
imgpth = os.path.join(LAUNCH_PATH, 'sampleimages','lenna.png')
source = imgpth
elif tmpname == "lyle":
imgpth = os.path.join(LAUNCH_PATH, 'sampleimages','LyleJune1973.png')
source = imgpth
elif tmpname == "parity":
choice = random.choice(['LyleJune1973.png','lenna.png'])
imgpth = os.path.join(LAUNCH_PATH, 'sampleimages',choice)
source = imgpth
elif sample:
imgpth = os.path.join(LAUNCH_PATH, 'sampleimages', source)
source = imgpth
if (type(source) == tuple):
w = int(source[0])
h = int(source[1])
source = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 3)
cv.Zero(source)
if (type(source) == cv.cvmat):
self._matrix = cv.CreateMat(source.rows, source.cols, cv.CV_8UC3)
if((source.step/source.cols)==3): #this is just a guess
cv.Copy(source, self._matrix, None)
self._colorSpace = ColorSpace.BGR
elif((source.step/source.cols)==1):
cv.Merge(source, source, source, None, self._matrix)
self._colorSpace = ColorSpace.GRAY
else:
self._colorSpace = ColorSpace.UNKNOWN
warnings.warn("Unable to process the provided cvmat")
elif (type(source) == np.ndarray): #handle a numpy array conversion
if (type(source[0, 0]) == np.ndarray): #we have a 3 channel array
#convert to an iplimage bitmap
source = source.astype(np.uint8)
self._numpy = source
if not cv2image:
invertedsource = source[:, :, ::-1].transpose([1, 0, 2])
else:
# If the numpy array is from cv2, then it must not be transposed.
invertedsource = source
#invertedsource = source[:, :, ::-1].transpose([1, 0, 2]) # do not un-comment. breaks cv2 image support
self._bitmap = cv.CreateImageHeader((invertedsource.shape[1], invertedsource.shape[0]), cv.IPL_DEPTH_8U, 3)
cv.SetData(self._bitmap, invertedsource.tostring(),
invertedsource.dtype.itemsize * 3 * invertedsource.shape[1])
self._colorSpace = ColorSpace.BGR #this is an educated guess
else:
#we have a single channel array, convert to an RGB iplimage
source = source.astype(np.uint8)
if not cv2image:
source = source.transpose([1,0]) #we expect width/height but use col/row
self._bitmap = cv.CreateImage((source.shape[1], source.shape[0]), cv.IPL_DEPTH_8U, 3)
channel = cv.CreateImageHeader((source.shape[1], source.shape[0]), cv.IPL_DEPTH_8U, 1)
#initialize an empty channel bitmap
cv.SetData(channel, source.tostring(),
source.dtype.itemsize * source.shape[1])
cv.Merge(channel, channel, channel, None, self._bitmap)
self._colorSpace = ColorSpace.BGR
elif (type(source) == cv.iplimage):
if (source.nChannels == 1):
self._bitmap = cv.CreateImage(cv.GetSize(source), source.depth, 3)
cv.Merge(source, source, source, None, self._bitmap)
self._colorSpace = ColorSpace.GRAY
else:
self._bitmap = cv.CreateImage(cv.GetSize(source), source.depth, 3)
cv.Copy(source, self._bitmap, None)
self._colorSpace = ColorSpace.BGR
elif (type(source) == type(str()) or source.__class__.__name__ == 'StringIO'):
if source == '':
raise IOError("No filename provided to Image constructor")
elif webp or source.split('.')[-1] == 'webp':
try:
if source.__class__.__name__ == 'StringIO':
source.seek(0) # set the stringIO to the begining
self._pil = pil.open(source)
self._bitmap = cv.CreateImageHeader(self._pil.size, cv.IPL_DEPTH_8U, 3)
except:
try:
from webm import decode as webmDecode
except ImportError:
logger.warning('The webm module or latest PIL / PILLOW module needs to be installed to load webp files: https://github.com/sightmachine/python-webm')
return
WEBP_IMAGE_DATA = bytearray(file(source, "rb").read())
result = webmDecode.DecodeRGB(WEBP_IMAGE_DATA)
webpImage = pil.frombuffer(
"RGB", (result.width, result.height), str(result.bitmap),
"raw", "RGB", 0, 1
)
self._pil = webpImage.convert("RGB")
self._bitmap = cv.CreateImageHeader(self._pil.size, cv.IPL_DEPTH_8U, 3)
self.filename = source
cv.SetData(self._bitmap, self._pil.tostring())
cv.CvtColor(self._bitmap, self._bitmap, cv.CV_RGB2BGR)
else:
self.filename = source
try:
self._bitmap = cv.LoadImage(self.filename, iscolor=cv.CV_LOAD_IMAGE_COLOR)
except:
self._pil = pil.open(self.filename).convert("RGB")
self._bitmap = cv.CreateImageHeader(self._pil.size, cv.IPL_DEPTH_8U, 3)
cv.SetData(self._bitmap, self._pil.tostring())
cv.CvtColor(self._bitmap, self._bitmap, cv.CV_RGB2BGR)
#TODO, on IOError fail back to PIL
self._colorSpace = ColorSpace.BGR
elif (type(source) == pg.Surface):
self._pgsurface = source
self._bitmap = cv.CreateImageHeader(self._pgsurface.get_size(), cv.IPL_DEPTH_8U, 3)
cv.SetData(self._bitmap, pg.image.tostring(self._pgsurface, "RGB"))
cv.CvtColor(self._bitmap, self._bitmap, cv.CV_RGB2BGR)
self._colorSpace = ColorSpace.BGR
elif (PIL_ENABLED and (
(len(source.__class__.__bases__) and source.__class__.__bases__[0].__name__ == "ImageFile")
or source.__class__.__name__ == "JpegImageFile"
or source.__class__.__name__ == "WebPPImageFile"
or source.__class__.__name__ == "Image")):
if source.mode != 'RGB':
source = source.convert('RGB')
self._pil = source
#from the opencv cookbook
#http://opencv.willowgarage.com/documentation/python/cookbook.html
self._bitmap = cv.CreateImageHeader(self._pil.size, cv.IPL_DEPTH_8U, 3)
cv.SetData(self._bitmap, self._pil.tostring())
self._colorSpace = ColorSpace.BGR
cv.CvtColor(self._bitmap, self._bitmap, cv.CV_RGB2BGR)
#self._bitmap = cv.iplimage(self._bitmap)
else:
return None
#if the caller passes in a colorspace we overide it
if(colorSpace != ColorSpace.UNKNOWN):
self._colorSpace = colorSpace
bm = self.getBitmap()
self.width = bm.width
self.height = bm.height
self.depth = bm.depth
def __del__(self):
"""
This is called when the instance is about to be destroyed also called a destructor.
"""
try :
for i in self._tempFiles:
if (i[1]):
os.remove(i[0])
except :
pass
def getEXIFData(self):
"""
**SUMMARY**
This function extracts the exif data from an image file like JPEG or TIFF. The data is returned as a dict.
**RETURNS**
A dictionary of key value pairs. The value pairs are defined in the EXIF.py file.
**EXAMPLE**
>>> img = Image("./SimpleCV/sampleimages/OWS.jpg")
>>> data = img.getEXIFData()
>>> data['Image GPSInfo'].values
**NOTES**
* Compliments of: http://exif-py.sourceforge.net/
* See also: http://en.wikipedia.org/wiki/Exchangeable_image_file_format
**See Also**
:py:class:`EXIF`
"""
import os, string
if( len(self.filename) < 5 or self.filename is None ):
#I am not going to warn, better of img sets
#logger.warning("ImageClass.getEXIFData: This image did not come from a file, can't get EXIF data.")
return {}
fileName, fileExtension = os.path.splitext(self.filename)
fileExtension = string.lower(fileExtension)
if( fileExtension != '.jpeg' and fileExtension != '.jpg' and
fileExtension != 'tiff' and fileExtension != '.tif'):
#logger.warning("ImageClass.getEXIFData: This image format does not support EXIF")
return {}
raw = open(self.filename,'rb')
data = process_file(raw)
return data
def live(self):
"""
**SUMMARY**
This shows a live view of the camera.
* Left click will show mouse coordinates and color.
* Right click will kill the live image.
**RETURNS**
Nothing. In place method.
**EXAMPLE**
>>> cam = Camera()
>>> cam.live()
"""
start_time = time.time()
from SimpleCV.Display import Display
i = self
d = Display(i.size())
i.save(d)
col = Color.RED
while d.isNotDone():
i = self
i.clearLayers()
elapsed_time = time.time() - start_time
if d.mouseLeft:
txt = "coord: (" + str(d.mouseX) + "," + str(d.mouseY) + ")"
i.dl().text(txt, (10,i.height / 2), color=col)
txt = "color: " + str(i.getPixel(d.mouseX,d.mouseY))
i.dl().text(txt, (10,(i.height / 2) + 10), color=col)
print "coord: (" + str(d.mouseX) + "," + str(d.mouseY) + "), color: " + str(i.getPixel(d.mouseX,d.mouseY))
if elapsed_time > 0 and elapsed_time < 5:
i.dl().text("In live mode", (10,10), color=col)
i.dl().text("Left click will show mouse coordinates and color", (10,20), color=col)
i.dl().text("Right click will kill the live image", (10,30), color=col)
i.save(d)
if d.mouseRight:
print "Closing Window"
d.done = True
pg.quit()
def getColorSpace(self):
"""
**SUMMARY**
Returns the value matched in the color space class
**RETURNS**
Integer corresponding to the color space.
**EXAMPLE**
>>> if(image.getColorSpace() == ColorSpace.RGB)
**SEE ALSO**
:py:class:`ColorSpace`
"""
return self._colorSpace
def isRGB(self):
"""
**SUMMARY**
Returns true if this image uses the RGB colorspace.
**RETURNS**
True if the image uses the RGB colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isRGB() ):
>>> r,g,b = img.splitChannels()
**SEE ALSO**
:py:meth:`toRGB`
"""
return(self._colorSpace==ColorSpace.RGB)
def isBGR(self):
"""
**SUMMARY**
Returns true if this image uses the BGR colorspace.
**RETURNS**
True if the image uses the BGR colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isBGR() ):
>>> b,g,r = img.splitChannels()
**SEE ALSO**
:py:meth:`toBGR`
"""
return(self._colorSpace==ColorSpace.BGR)
def isHSV(self):
"""
**SUMMARY**
Returns true if this image uses the HSV colorspace.
**RETURNS**
True if the image uses the HSV colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isHSV() ):
>>> h,s,v = img.splitChannels()
**SEE ALSO**
:py:meth:`toHSV`
"""
return(self._colorSpace==ColorSpace.HSV)
def isHLS(self):
"""
**SUMMARY**
Returns true if this image uses the HLS colorspace.
**RETURNS**
True if the image uses the HLS colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isHLS() ):
>>> h,l,s = img.splitChannels()
**SEE ALSO**
:py:meth:`toHLS`
"""
return(self._colorSpace==ColorSpace.HLS)
def isXYZ(self):
"""
**SUMMARY**
Returns true if this image uses the XYZ colorspace.
**RETURNS**
True if the image uses the XYZ colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isXYZ() ):
>>> x,y,z = img.splitChannels()
**SEE ALSO**
:py:meth:`toXYZ`
"""
return(self._colorSpace==ColorSpace.XYZ)
def isGray(self):
"""
**SUMMARY**
Returns true if this image uses the Gray colorspace.
**RETURNS**
True if the image uses the Gray colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isGray() ):
>>> print "The image is in Grayscale."
**SEE ALSO**
:py:meth:`toGray`
"""
return(self._colorSpace==ColorSpace.GRAY)
def isYCrCb(self):
"""
**SUMMARY**
Returns true if this image uses the YCrCb colorspace.
**RETURNS**
True if the image uses the YCrCb colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isYCrCb() ):
>>> Y,Cr,Cb = img.splitChannels()
**SEE ALSO**
:py:meth:`toYCrCb`
"""
return(self._colorSpace==ColorSpace.YCrCb)
def toRGB(self):
"""
**SUMMARY**
This method attemps to convert the image to the RGB colorspace.
If the color space is unknown we assume it is in the BGR format
**RETURNS**
Returns the converted image if the conversion was successful,
otherwise None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> RGBImg = img.toRGB()
**SEE ALSO**
:py:meth:`isRGB`
"""
retVal = self.getEmpty()
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_BGR2RGB)
elif( self._colorSpace == ColorSpace.HSV ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HSV2RGB)
elif( self._colorSpace == ColorSpace.HLS ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HLS2RGB)
elif( self._colorSpace == ColorSpace.XYZ ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2RGB)
elif( self._colorSpace == ColorSpace.YCrCb ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_YCrCb2RGB)
elif( self._colorSpace == ColorSpace.RGB ):
retVal = self.getBitmap()
else:
logger.warning("Image.toRGB: There is no supported conversion to RGB colorspace")
return None
return Image(retVal, colorSpace=ColorSpace.RGB )
def toBGR(self):
"""
**SUMMARY**
This method attemps to convert the image to the BGR colorspace.
If the color space is unknown we assume it is in the BGR format.
**RETURNS**
Returns the converted image if the conversion was successful,
otherwise None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> BGRImg = img.toBGR()
**SEE ALSO**
:py:meth:`isBGR`
"""
retVal = self.getEmpty()
if( self._colorSpace == ColorSpace.RGB or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_RGB2BGR)
elif( self._colorSpace == ColorSpace.HSV ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HSV2BGR)
elif( self._colorSpace == ColorSpace.HLS ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HLS2BGR)
elif( self._colorSpace == ColorSpace.XYZ ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2BGR)
elif( self._colorSpace == ColorSpace.YCrCb ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_YCrCb2BGR)
elif( self._colorSpace == ColorSpace.BGR ):
retVal = self.getBitmap()
else:
logger.warning("Image.toBGR: There is no supported conversion to BGR colorspace")
return None
return Image(retVal, colorSpace = ColorSpace.BGR )
def toHLS(self):
"""
**SUMMARY**
This method attempts to convert the image to the HLS colorspace.
If the color space is unknown we assume it is in the BGR format.
**RETURNS**
Returns the converted image if the conversion was successful,
otherwise None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> HLSImg = img.toHLS()
**SEE ALSO**
:py:meth:`isHLS`
"""
retVal = self.getEmpty()
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_BGR2HLS)
elif( self._colorSpace == ColorSpace.RGB):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_RGB2HLS)
elif( self._colorSpace == ColorSpace.HSV ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HSV2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2HLS)
elif( self._colorSpace == ColorSpace.XYZ ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2HLS)
elif( self._colorSpace == ColorSpace.YCrCb ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_YCrCb2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2HLS)
elif( self._colorSpace == ColorSpace.HLS ):
retVal = self.getBitmap()
else:
logger.warning("Image.toHSL: There is no supported conversion to HSL colorspace")
return None
return Image(retVal, colorSpace = ColorSpace.HLS )
def toHSV(self):
"""
**SUMMARY**
This method attempts to convert the image to the HSV colorspace.
If the color space is unknown we assume it is in the BGR format
**RETURNS**
Returns the converted image if the conversion was successful,
otherwise None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> HSVImg = img.toHSV()
**SEE ALSO**
:py:meth:`isHSV`
"""
retVal = self.getEmpty()
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_BGR2HSV)
elif( self._colorSpace == ColorSpace.RGB):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_RGB2HSV)
elif( self._colorSpace == ColorSpace.HLS ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HLS2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2HSV)
elif( self._colorSpace == ColorSpace.XYZ ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2HSV)
elif( self._colorSpace == ColorSpace.YCrCb ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_YCrCb2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2HSV)
elif( self._colorSpace == ColorSpace.HSV ):
retVal = self.getBitmap()
else:
logger.warning("Image.toHSV: There is no supported conversion to HSV colorspace")
return None
return Image(retVal, colorSpace = ColorSpace.HSV )
def toXYZ(self):
"""
**SUMMARY**
This method attemps to convert the image to the XYZ colorspace.
If the color space is unknown we assume it is in the BGR format
**RETURNS**
Returns the converted image if the conversion was successful,
otherwise None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> XYZImg = img.toXYZ()
**SEE ALSO**
:py:meth:`isXYZ`
"""
retVal = self.getEmpty()
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_BGR2XYZ)
elif( self._colorSpace == ColorSpace.RGB):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_RGB2XYZ)
elif( self._colorSpace == ColorSpace.HLS ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HLS2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2XYZ)
elif( self._colorSpace == ColorSpace.HSV ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HSV2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2XYZ)
elif( self._colorSpace == ColorSpace.YCrCb ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_YCrCb2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2XYZ)
elif( self._colorSpace == ColorSpace.XYZ ):
retVal = self.getBitmap()
else:
logger.warning("Image.toXYZ: There is no supported conversion to XYZ colorspace")
return None
return Image(retVal, colorSpace=ColorSpace.XYZ )
def toGray(self):
"""
**SUMMARY**
This method attemps to convert the image to the grayscale colorspace.
If the color space is unknown we assume it is in the BGR format.
**RETURNS**
A grayscale SimpleCV image if successful.
otherwise None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> img.toGray().binarize().show()
**SEE ALSO**
:py:meth:`isGray`
:py:meth:`binarize`
"""
retVal = self.getEmpty(1)
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_BGR2GRAY)
elif( self._colorSpace == ColorSpace.RGB):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_RGB2GRAY)
elif( self._colorSpace == ColorSpace.HLS ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HLS2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2GRAY)
elif( self._colorSpace == ColorSpace.HSV ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HSV2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2GRAY)
elif( self._colorSpace == ColorSpace.XYZ ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2GRAY)
elif( self._colorSpace == ColorSpace.YCrCb ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_YCrCb2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2GRAY)
elif( self._colorSpace == ColorSpace.GRAY ):
retVal = self.getBitmap()
else:
logger.warning("Image.toGray: There is no supported conversion to gray colorspace")
return None
return Image(retVal, colorSpace = ColorSpace.GRAY )
def toYCrCb(self):
"""
**SUMMARY**
This method attemps to convert the image to the YCrCb colorspace.
If the color space is unknown we assume it is in the BGR format
**RETURNS**
Returns the converted image if the conversion was successful,
otherwise None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> RGBImg = img.toYCrCb()
**SEE ALSO**
:py:meth:`isYCrCb`
"""
retVal = self.getEmpty()
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_BGR2YCrCb)
elif( self._colorSpace == ColorSpace.RGB ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_RGB2YCrCb)
elif( self._colorSpace == ColorSpace.HSV ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HSV2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2YCrCb)
elif( self._colorSpace == ColorSpace.HLS ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HLS2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2YCrCb)
elif( self._colorSpace == ColorSpace.XYZ ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2YCrCb)
elif( self._colorSpace == ColorSpace.YCrCb ):
retVal = self.getBitmap()
else:
logger.warning("Image.toYCrCb: There is no supported conversion to YCrCb colorspace")
return None
return Image(retVal, colorSpace=ColorSpace.YCrCb )
def getEmpty(self, channels=3):
"""
**SUMMARY**
Create a new, empty OpenCV bitmap with the specified number of channels (default 3).
This method basically creates an empty copy of the image. This is handy for
interfacing with OpenCV functions directly.
**PARAMETERS**
* *channels* - The number of channels in the returned OpenCV image.
**RETURNS**
Returns an black OpenCV IplImage that matches the width, height, and color
depth of the source image.
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getEmpty()
>>> cv.SomeOpenCVFunc(img.getBitmap(),rawImg)
**SEE ALSO**
:py:meth:`getBitmap`
:py:meth:`getFPMatrix`
:py:meth:`getPIL`
:py:meth:`getNumpy`
:py:meth:`getGrayNumpy`
:py:meth:`getGrayscaleMatrix`
"""
bitmap = cv.CreateImage(self.size(), cv.IPL_DEPTH_8U, channels)
cv.SetZero(bitmap)
return bitmap
def getBitmap(self):
"""
**SUMMARY**
Retrieve the bitmap (iplImage) of the Image. This is useful if you want
to use functions from OpenCV with SimpleCV's image class
**RETURNS**
Returns black OpenCV IplImage from this image.
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getBitmap()
>>> rawOut = img.getEmpty()
>>> cv.SomeOpenCVFunc(rawImg,rawOut)
**SEE ALSO**
:py:meth:`getEmpty`
:py:meth:`getFPMatrix`
:py:meth:`getPIL`
:py:meth:`getNumpy`
:py:meth:`getGrayNumpy`
:py:meth:`getGrayscaleMatrix`
"""
if (self._bitmap):
return self._bitmap
elif (self._matrix):
self._bitmap = cv.GetImage(self._matrix)
return self._bitmap
def getMatrix(self):
"""
**SUMMARY**
Get the matrix (cvMat) version of the image, required for some OpenCV algorithms.
**RETURNS**
Returns the OpenCV CvMat version of this image.
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getMatrix()
>>> rawOut = img.getEmpty()
>>> cv.SomeOpenCVFunc(rawImg,rawOut)
**SEE ALSO**
:py:meth:`getEmpty`
:py:meth:`getBitmap`
:py:meth:`getFPMatrix`
:py:meth:`getPIL`
:py:meth:`getNumpy`
:py:meth:`getGrayNumpy`
:py:meth:`getGrayscaleMatrix`
"""
if (self._matrix):
return self._matrix
else:
self._matrix = cv.GetMat(self.getBitmap()) #convert the bitmap to a matrix
return self._matrix
def getFPMatrix(self):
"""
**SUMMARY**
Converts the standard int bitmap to a floating point bitmap.
This is handy for some OpenCV functions.
**RETURNS**
Returns the floating point OpenCV CvMat version of this image.
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getFPMatrix()
>>> rawOut = img.getEmpty()
>>> cv.SomeOpenCVFunc(rawImg,rawOut)
**SEE ALSO**
:py:meth:`getEmpty`
:py:meth:`getBitmap`
:py:meth:`getMatrix`
:py:meth:`getPIL`
:py:meth:`getNumpy`
:py:meth:`getGrayNumpy`
:py:meth:`getGrayscaleMatrix`
"""
retVal = cv.CreateImage((self.width,self.height), cv.IPL_DEPTH_32F, 3)
cv.Convert(self.getBitmap(),retVal)
return retVal
def getPIL(self):
"""
**SUMMARY**
Get a PIL Image object for use with the Python Image Library
This is handy for some PIL functions.
**RETURNS**
Returns the Python Imaging Library (PIL) version of this image.
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getPIL()
**SEE ALSO**
:py:meth:`getEmpty`
:py:meth:`getBitmap`
:py:meth:`getMatrix`
:py:meth:`getFPMatrix`
:py:meth:`getNumpy`
:py:meth:`getGrayNumpy`
:py:meth:`getGrayscaleMatrix`
"""
if (not PIL_ENABLED):
return None
if (not self._pil):
rgbbitmap = self.getEmpty()
cv.CvtColor(self.getBitmap(), rgbbitmap, cv.CV_BGR2RGB)
self._pil = pil.fromstring("RGB", self.size(), rgbbitmap.tostring())
return self._pil
def getGrayNumpy(self):
"""
**SUMMARY**
Return a grayscale Numpy array of the image.
**RETURNS**
Returns the image, converted first to grayscale and then converted to a 2D numpy array.
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getGrayNumpy()
**SEE ALSO**
:py:meth:`getEmpty`
:py:meth:`getBitmap`
:py:meth:`getMatrix`
:py:meth:`getPIL`
:py:meth:`getNumpy`
:py:meth:`getGrayNumpy`
:py:meth:`getGrayscaleMatrix`
"""
if( self._grayNumpy != "" ):
return self._grayNumpy
else:
self._grayNumpy = uint8(np.array(cv.GetMat(self._getGrayscaleBitmap())).transpose())
return self._grayNumpy
def getNumpy(self):
"""
**SUMMARY**
Get a Numpy array of the image in width x height x RGB dimensions
**RETURNS**
Returns the image, converted first to grayscale and then converted to a 3D numpy array.
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getNumpy()
**SEE ALSO**
:py:meth:`getEmpty`
:py:meth:`getBitmap`
:py:meth:`getMatrix`
:py:meth:`getPIL`
:py:meth:`getGrayNumpy`
:py:meth:`getGrayscaleMatrix`
"""
if self._numpy != "":
return self._numpy
self._numpy = np.array(self.getMatrix())[:, :, ::-1].transpose([1, 0, 2])
return self._numpy
def getNumpyCv2(self):
"""
**SUMMARY**
Get a Numpy array of the image in width x height x RGB dimensions compatible with OpenCV >= 2.3
**RETURNS**
Returns the 3D numpy array of the image compatible with OpenCV >= 2.3
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getNumpyCv2()
**SEE ALSO**
:py:meth:`getEmpty`
:py:meth:`getBitmap`
:py:meth:`getMatrix`
:py:meth:`getPIL`
:py:meth:`getGrayNumpy`
:py:meth:`getGrayscaleMatrix`
:py:meth:`getNumpy`
:py:meth:`getGrayNumpyCv2`
"""
if type(self._cv2Numpy) is not np.ndarray:
self._cv2Numpy = np.array(self.getMatrix())
return self._cv2Numpy
def getGrayNumpyCv2(self):
"""
**SUMMARY**
Get a Grayscale Numpy array of the image in width x height y compatible with OpenCV >= 2.3
**RETURNS**
Returns the grayscale numpy array compatible with OpenCV >= 2.3
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getNumpyCv2()
**SEE ALSO**
:py:meth:`getEmpty`
:py:meth:`getBitmap`
:py:meth:`getMatrix`
:py:meth:`getPIL`
:py:meth:`getGrayNumpy`
:py:meth:`getGrayscaleMatrix`
:py:meth:`getNumpy`
:py:meth:`getGrayNumpyCv2`
"""
if type(self._cv2GrayNumpy) is not np.ndarray:
self._cv2GrayNumpy = np.array(self.getGrayscaleMatrix())
return self._cv2GrayNumpy
def _getGrayscaleBitmap(self):
if (self._graybitmap):
return self._graybitmap
self._graybitmap = self.getEmpty(1)
temp = self.getEmpty(3)
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), self._graybitmap, cv.CV_BGR2GRAY)
elif( self._colorSpace == ColorSpace.RGB):
cv.CvtColor(self.getBitmap(), self._graybitmap, cv.CV_RGB2GRAY)
elif( self._colorSpace == ColorSpace.HLS ):
cv.CvtColor(self.getBitmap(), temp, cv.CV_HLS2RGB)
cv.CvtColor(temp, self._graybitmap, cv.CV_RGB2GRAY)
elif( self._colorSpace == ColorSpace.HSV ):
cv.CvtColor(self.getBitmap(), temp, cv.CV_HSV2RGB)
cv.CvtColor(temp, self._graybitmap, cv.CV_RGB2GRAY)
elif( self._colorSpace == ColorSpace.XYZ ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2RGB)
cv.CvtColor(temp, self._graybitmap, cv.CV_RGB2GRAY)
elif( self._colorSpace == ColorSpace.GRAY):
cv.Split(self.getBitmap(), self._graybitmap, self._graybitmap, self._graybitmap, None)
else:
logger.warning("Image._getGrayscaleBitmap: There is no supported conversion to gray colorspace")
return None
return self._graybitmap
def getGrayscaleMatrix(self):
"""
**SUMMARY**
Get the grayscale matrix (cvMat) version of the image, required for some OpenCV algorithms.
**RETURNS**
Returns the OpenCV CvMat version of this image.
**EXAMPLE**
>>> img = Image("lenna")
>>> rawImg = img.getGrayscaleMatrix()
>>> rawOut = img.getEmpty()
>>> cv.SomeOpenCVFunc(rawImg,rawOut)
**SEE ALSO**
:py:meth:`getEmpty`
:py:meth:`getBitmap`
:py:meth:`getFPMatrix`
:py:meth:`getPIL`
:py:meth:`getNumpy`
:py:meth:`getGrayNumpy`
:py:meth:`getMatrix`
"""
if (self._grayMatrix):
return self._grayMatrix
else:
self._grayMatrix = cv.GetMat(self._getGrayscaleBitmap()) #convert the bitmap to a matrix
return self._grayMatrix
def _getEqualizedGrayscaleBitmap(self):
if (self._equalizedgraybitmap):
return self._equalizedgraybitmap
self._equalizedgraybitmap = self.getEmpty(1)
cv.EqualizeHist(self._getGrayscaleBitmap(), self._equalizedgraybitmap)
return self._equalizedgraybitmap
def equalize(self):
"""
**SUMMARY**
Perform a histogram equalization on the image.
**RETURNS**
Returns a grayscale SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> img = img.equalize()
"""
return Image(self._getEqualizedGrayscaleBitmap())
def getPGSurface(self):
"""
**SUMMARY**
Returns the image as a pygame surface. This is used for rendering the display
**RETURNS**
A pygame surface object used for rendering.
"""
if (self._pgsurface):
return self._pgsurface
else:
if self.isGray():
self._pgsurface = pg.image.fromstring(self.getBitmap().tostring(), self.size(), "RGB")
else:
self._pgsurface = pg.image.fromstring(self.toRGB().getBitmap().tostring(), self.size(), "RGB")
return self._pgsurface
def toString(self):
"""
**SUMMARY**
Returns the image as a string, useful for moving data around.
**RETURNS**
The image, converted to rgb, then converted to a string.
"""
return self.toRGB().getBitmap().tostring()
def save(self, filehandle_or_filename="", mode="", verbose=False, temp=False, path=None, filename=None, cleanTemp=False ,**params):
"""
**SUMMARY**
Save the image to the specified filename. If no filename is provided then
then it will use the filename the Image was loaded from or the last
place it was saved to. You can save to lots of places, not just files.
For example you can save to the Display, a JpegStream, VideoStream,
temporary file, or Ipython Notebook.
Save will implicitly render the image's layers before saving, but the layers are
not applied to the Image itself.
**PARAMETERS**
* *filehandle_or_filename* - the filename to which to store the file. The method will infer the file type.
* *mode* - This flag is used for saving using pul.
* *verbose* - If this flag is true we return the path where we saved the file.
* *temp* - If temp is True we save the image as a temporary file and return the path
* *path* - path where temporary files needed to be stored
* *filename* - name(Prefix) of the temporary file.
* *cleanTemp* - This flag is made True if tempfiles are tobe deleted once the object is to be destroyed.
* *params* - This object is used for overloading the PIL save methods. In particular
this method is useful for setting the jpeg compression level. For JPG see this documentation:
http://www.pythonware.com/library/pil/handbook/format-jpeg.htm
**EXAMPLES**
To save as a temporary file just use:
>>> img = Image('simplecv')
>>> img.save(temp=True)
It will return the path that it saved to.
Save also supports IPython Notebooks when passing it a Display object
that has been instainted with the notebook flag.
To do this just use::
>>> disp = Display(displaytype='notebook')
>>> img.save(disp)
.. Note::
You must have IPython notebooks installed for this to work path and filename are valid if and only if temp is set to True.
.. attention::
We need examples for all save methods as they are unintuitve.
"""
#TODO, we use the term mode here when we mean format
#TODO, if any params are passed, use PIL
if temp :
import glob
if filename == None :
filename = 'Image'
if path == None :
path=tempfile.gettempdir()
if glob.os.path.exists(path):
path = glob.os.path.abspath(path)
imagefiles = glob.glob(glob.os.path.join(path,filename+"*.png"))
num = [0]
for img in imagefiles :
num.append(int(glob.re.findall('[0-9]+$',img[:-4])[-1]))
num.sort()
fnum = num[-1]+1
filename = glob.os.path.join(path,filename+("%07d" % fnum)+".png")
self._tempFiles.append((filename,cleanTemp))
self.save(self._tempFiles[-1][0])
return self._tempFiles[-1][0]
else :
print "Path does not exist!"
else :
if (filename) :
filehandle_or_filename = filename + ".png"
if (not filehandle_or_filename):
if (self.filename):
filehandle_or_filename = self.filename
else:
filehandle_or_filename = self.filehandle
if (len(self._mLayers)):
saveimg = self.applyLayers()
else:
saveimg = self
if self._colorSpace != ColorSpace.BGR and self._colorSpace != ColorSpace.GRAY:
saveimg = saveimg.toBGR()
if not isinstance(filehandle_or_filename, basestring):
fh = filehandle_or_filename
if (not PIL_ENABLED):
logger.warning("You need the python image library to save by filehandle")
return 0
if (type(fh) == InstanceType and fh.__class__.__name__ == "JpegStreamer"):
fh.jpgdata = StringIO()
saveimg.getPIL().save(fh.jpgdata, "jpeg", **params) #save via PIL to a StringIO handle
fh.refreshtime = time.time()
self.filename = ""
self.filehandle = fh
elif (type(fh) == InstanceType and fh.__class__.__name__ == "VideoStream"):
self.filename = ""
self.filehandle = fh
fh.writeFrame(saveimg)
elif (type(fh) == InstanceType and fh.__class__.__name__ == "Display"):
if fh.displaytype == 'notebook':
try:
from IPython.core.display import Image as IPImage
except ImportError:
print "You need IPython Notebooks to use this display mode"
return
from IPython.core import display as Idisplay
tf = tempfile.NamedTemporaryFile(suffix=".png")
loc = tf.name
tf.close()
self.save(loc)
Idisplay.display(IPImage(filename=loc))
return
else:
#self.filename = ""
self.filehandle = fh
fh.writeFrame(saveimg)
else:
if (not mode):
mode = "jpeg"
try:
saveimg.getPIL().save(fh, mode, **params) # The latest version of PIL / PILLOW supports webp, try this first, if not gracefully fallback
self.filehandle = fh #set the filename for future save operations
self.filename = ""
return 1
except Exception, e:
if mode.lower() != 'webp':
raise e
if verbose:
print self.filename
if not mode.lower() == 'webp':
return 1
#make a temporary file location if there isn't one
if not filehandle_or_filename:
filename = tempfile.mkstemp(suffix=".png")[-1]
else:
filename = filehandle_or_filename
#allow saving in webp format
if mode == 'webp' or re.search('\.webp$', filename):
try:
#newer versions of PIL support webp format, try that first
self.getPIL().save(filename, **params)
except:
#if PIL doesn't support it, maybe we have the python-webm library
try:
from webm import encode as webmEncode
from webm.handlers import BitmapHandler, WebPHandler
except:
logger.warning('You need the webm library to save to webp format. You can download from: https://github.com/sightmachine/python-webm')
return 0
#PNG_BITMAP_DATA = bytearray(Image.open(PNG_IMAGE_FILE).tostring())
PNG_BITMAP_DATA = bytearray(self.toString())
IMAGE_WIDTH = self.width
IMAGE_HEIGHT = self.height
image = BitmapHandler(
PNG_BITMAP_DATA, BitmapHandler.RGB,
IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_WIDTH * 3
)
result = webmEncode.EncodeRGB(image)
if filehandle_or_filename.__class__.__name__ == 'StringIO':
filehandle_or_filename.write(result.data)
else:
file(filename.format("RGB"), "wb").write(result.data)
return 1
#if the user is passing kwargs use the PIL save method.
if( params ): #usually this is just the compression rate for the image
if (not mode):
mode = "jpeg"
saveimg.getPIL().save(filename, mode, **params)
return 1
if (filename):
cv.SaveImage(filename, saveimg.getBitmap())
self.filename = filename #set the filename for future save operations
self.filehandle = ""
elif (self.filename):
cv.SaveImage(self.filename, saveimg.getBitmap())
else:
return 0
if verbose:
print self.filename
if temp:
return filename
else:
return 1
def copy(self):
"""
**SUMMARY**
Return a full copy of the Image's bitmap. Note that this is different
from using python's implicit copy function in that only the bitmap itself
is copied. This method essentially performs a deep copy.
**RETURNS**
A copy of this SimpleCV image.
**EXAMPLE**
>>> img = Image("logo")
>>> img2 = img.copy()
"""
newimg = self.getEmpty()
cv.Copy(self.getBitmap(), newimg)
return Image(newimg, colorSpace=self._colorSpace)
def upload(self,dest,api_key=None,api_secret=None, verbose = True):
"""
**SUMMARY**
Uploads image to imgur or flickr or dropbox. In verbose mode URL values are printed.
**PARAMETERS**
* *api_key* - a string of the API key.
* *api_secret* (required only for flickr and dropbox ) - a string of the API secret.
* *verbose* - If verbose is true all values are printed to the screen
**RETURNS**
if uploading is successful
- Imgur return the original image URL on success and None if it fails.
- Flick returns True on success, else returns False.
- dropbox returns True on success.
**EXAMPLE**
TO upload image to imgur::
>>> img = Image("lenna")
>>> result = img.upload( 'imgur',"MY_API_KEY1234567890" )
>>> print "Uploaded To: " + result[0]
To upload image to flickr::
>>> img.upload('flickr','api_key','api_secret')
>>> img.invert().upload('flickr') #Once the api keys and secret keys are cached.
To upload image to dropbox::
>>> img.upload('dropbox','api_key','api_secret')
>>> img.invert().upload('dropbox') #Once the api keys and secret keys are cached.
**NOTES**
.. Warning::
This method requires two packages to be installed
- PyCurl
- flickr api.
- dropbox
.. Warning::
You must supply your own API key. See here:
- http://imgur.com/register/api_anon
- http://www.flickr.com/services/api/misc.api_keys.html
- https://www.dropbox.com/developers/start/setup#python
"""
if ( dest=='imgur' ) :
try:
import pycurl
except ImportError:
print "PycURL Library not installed."
return
response = StringIO()
c = pycurl.Curl()
values = [("key", api_key),
("image", (c.FORM_FILE, self.filename))]
c.setopt(c.URL, "http://api.imgur.com/2/upload.xml")
c.setopt(c.HTTPPOST, values)
c.setopt(c.WRITEFUNCTION, response.write)
c.perform()
c.close()
match = re.search(r'<hash>(\w+).*?<deletehash>(\w+).*?<original>(http://[\w.]+/[\w.]+)', response.getvalue() , re.DOTALL)
if match:
if(verbose):
print "Imgur page: http://imgur.com/" + match.group(1)
print "Original image: " + match.group(3)
print "Delete page: http://imgur.com/delete/" + match.group(2)
return [match.group(1),match.group(3),match.group(2)]
else :
if(verbose):
print "The API Key given is not valid"
return None
elif (dest=='flickr'):
global temp_token
flickr = None
try :
import flickrapi
except ImportError:
print "Flickr API is not installed. Please install it from http://pypi.python.org/pypi/flickrapi"
return False
try :
if (not(api_key==None and api_secret==None)):
self.flickr = flickrapi.FlickrAPI(api_key,api_secret,cache=True)
self.flickr.cache = flickrapi.SimpleCache(timeout=3600, max_entries=200)
self.flickr.authenticate_console('write')
temp_token = (api_key,api_secret)
else :
try :
self.flickr = flickrapi.FlickrAPI(temp_token[0],temp_token[1],cache=True)
self.flickr.authenticate_console('write')
except NameError :
print "API key and Secret key are not set."
return
except :
print "The API Key and Secret Key are not valid"
return False
if (self.filename) :
try :
self.flickr.upload(self.filename,self.filehandle)
except :
print "Uploading Failed !"
return False
else :
tf = self.save(temp=True)
self.flickr.upload(tf,"Image")
return True
elif (dest=='dropbox'):
global dropbox_token
access_type = 'dropbox'
try :
from dropbox import client, rest, session
import webbrowser
except ImportError:
print "Dropbox API is not installed. For more info refer : https://www.dropbox.com/developers/start/setup#python "
return False
try :
if ( 'dropbox_token' not in globals() and api_key!=None and api_secret!=None ):
sess = session.DropboxSession(api_key, api_secret, access_type)
request_token = sess.obtain_request_token()
url = sess.build_authorize_url(request_token)
webbrowser.open(url)
print "Please visit this website and press the 'Allow' button, then hit 'Enter' here."
raw_input()
access_token = sess.obtain_access_token(request_token)
dropbox_token = client.DropboxClient(sess)
else :
if (dropbox_token) :
pass
else :
return None
except :
print "The API Key and Secret Key are not valid"
return False
if (self.filename) :
try :
f = open(self.filename)
dropbox_token.put_file('/SimpleCVImages/'+os.path.split(self.filename)[-1], f)
except :
print "Uploading Failed !"
return False
else :
tf = self.save(temp=True)
f = open(tf)
dropbox_token.put_file('/SimpleCVImages/'+'Image', f)
return True
def scale(self, width, height = -1, interpolation=cv2.INTER_LINEAR):
"""
**SUMMARY**
Scale the image to a new width and height.
If no height is provided, the width is considered a scaling value.
**PARAMETERS**
* *width* - either the new width in pixels, if the height parameter is > 0, or if this value
is a floating point value, this is the scaling factor.
* *height* - the new height in pixels.
* *interpolation* - how to generate new pixels that don't match the original pixels. Argument goes direction to cv.Resize. See http://docs.opencv.org/modules/imgproc/doc/geometric_transformations.html?highlight=resize#cv2.resize for more details
**RETURNS**
The resized image.
**EXAMPLE**
>>> img.scale(200, 100) #scales the image to 200px x 100px
>>> img.scale(2.0) #enlarges the image to 2x its current size
.. Warning::
The two value scale command is deprecated. To set width and height
use the resize function.
:py:meth:`resize`
"""
w, h = width, height
if height == -1:
w = int(self.width * width)
h = int(self.height * width)
if( w > MAX_DIMENSION or h > MAX_DIMENSION or h < 1 or w < 1 ):
logger.warning("Holy Heck! You tried to make an image really big or impossibly small. I can't scale that")
return self
scaledArray = np.zeros((w,h,3),dtype='uint8')
retVal = cv2.resize(self.getNumpyCv2(), (w,h), interpolation = interpolation)
return Image(retVal, colorSpace=self._colorSpace,cv2image = True)
def resize(self, w=None,h=None):
"""
**SUMMARY**
This method resizes an image based on a width, a height, or both.
If either width or height is not provided the value is inferred by keeping the aspect ratio.
If both values are provided then the image is resized accordingly.
**PARAMETERS**
* *width* - The width of the output image in pixels.
* *height* - The height of the output image in pixels.
**RETURNS**
Returns a resized image, if the size is invalid a warning is issued and
None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> img2 = img.resize(w=1024) # h is guessed from w
>>> img3 = img.resize(h=1024) # w is guessed from h
>>> img4 = img.resize(w=200,h=100)
"""
retVal = None
if( w is None and h is None ):
logger.warning("Image.resize has no parameters. No operation is performed")
return None
elif( w is not None and h is None):
sfactor = float(w)/float(self.width)
h = int( sfactor*float(self.height) )
elif( w is None and h is not None):
sfactor = float(h)/float(self.height)
w = int( sfactor*float(self.width) )
if( w > MAX_DIMENSION or h > MAX_DIMENSION ):
logger.warning("Image.resize Holy Heck! You tried to make an image really big or impossibly small. I can't scale that")
return retVal
scaled_bitmap = cv.CreateImage((w, h), 8, 3)
cv.Resize(self.getBitmap(), scaled_bitmap)
return Image(scaled_bitmap, colorSpace=self._colorSpace)
def smooth(self, algorithm_name='gaussian', aperture=(3,3), sigma=0, spatial_sigma=0, grayscale=False, aperature=None):
"""
**SUMMARY**
Smooth the image, by default with the Gaussian blur. If desired,
additional algorithms and apertures can be specified. Optional parameters
are passed directly to OpenCV's cv.Smooth() function.
If grayscale is true the smoothing operation is only performed on a single channel
otherwise the operation is performed on each channel of the image.
for OpenCV versions >= 2.3.0 it is advisible to take a look at
- :py:meth:`bilateralFilter`
- :py:meth:`medianFilter`
- :py:meth:`blur`
- :py:meth:`gaussianBlur`
**PARAMETERS**
* *algorithm_name* - valid options are 'blur' or gaussian, 'bilateral', and 'median'.
* `Median Filter <http://en.wikipedia.org/wiki/Median_filter>`_
* `Gaussian Blur <http://en.wikipedia.org/wiki/Gaussian_blur>`_
* `Bilateral Filter <http://en.wikipedia.org/wiki/Bilateral_filter>`_
* *aperture* - A tuple for the aperture of the gaussian blur as an (x,y) tuple.
- Note there was rampant spelling mistakes in both smooth & sobel,
aperture is spelled as such, and not "aperature". This code is backwards
compatible.
.. Warning::
These must be odd numbers.
* *sigma* -
* *spatial_sigma* -
* *grayscale* - Return just the grayscale image.
**RETURNS**
The smoothed image.
**EXAMPLE**
>>> img = Image("Lenna")
>>> img2 = img.smooth()
>>> img3 = img.smooth('median')
**SEE ALSO**
:py:meth:`bilateralFilter`
:py:meth:`medianFilter`
:py:meth:`blur`
"""
# see comment on argument documentation (spelling error)
aperture = aperature if aperature else aperture
if is_tuple(aperture):
win_x, win_y = aperture
if win_x <= 0 or win_y <= 0 or win_x % 2 == 0 or win_y % 2 == 0:
logger.warning("The aperture (x,y) must be odd number and greater than 0.")
return None
else:
raise ValueError("Please provide a tuple to aperture, got: %s" % type(aperture))
#gauss and blur can work in-place, others need a buffer frame
#use a string to ID rather than the openCV constant
if algorithm_name == "blur":
algorithm = cv.CV_BLUR
elif algorithm_name == "bilateral":
algorithm = cv.CV_BILATERAL
win_y = win_x #aperture must be square
elif algorithm_name == "median":
algorithm = cv.CV_MEDIAN
win_y = win_x #aperture must be square
else:
algorithm = cv.CV_GAUSSIAN #default algorithm is gaussian
if grayscale:
newimg = self.getEmpty(1)
cv.Smooth(self._getGrayscaleBitmap(), newimg, algorithm, win_x, win_y, sigma, spatial_sigma)
else:
newimg = self.getEmpty(3)
r = self.getEmpty(1)
g = self.getEmpty(1)
b = self.getEmpty(1)
ro = self.getEmpty(1)
go = self.getEmpty(1)
bo = self.getEmpty(1)
cv.Split(self.getBitmap(), b, g, r, None)
cv.Smooth(r, ro, algorithm, win_x, win_y, sigma, spatial_sigma)
cv.Smooth(g, go, algorithm, win_x, win_y, sigma, spatial_sigma)
cv.Smooth(b, bo, algorithm, win_x, win_y, sigma, spatial_sigma)
cv.Merge(bo,go,ro, None, newimg)
return Image(newimg, colorSpace=self._colorSpace)
def medianFilter(self, window='',grayscale=False):
"""
**SUMMARY**
Smooths the image, with the median filter. Performs a median filtering operation to denoise/despeckle the image.
The optional parameter is the window size.
see : http://en.wikipedia.org/wiki/Median_filter
**Parameters**
* *window* - should be in the form a tuple (win_x,win_y). Where win_x should be equal to win_y. By default it is set to 3x3, i.e window = (3x3).
**Note**
win_x and win_y should be greater than zero, a odd number and equal.
For OpenCV versions <= 2.3.0
this acts as Convience function derived from the :py:meth:`smooth` method. Which internally calls cv.Smooth
For OpenCV versions >= 2.3.0
cv2.medianBlur function is called.
"""
try:
import cv2
new_version = True
except :
new_version = False
pass
if is_tuple(window):
win_x, win_y = window
if ( win_x>=0 and win_y>=0 and win_x%2==1 and win_y%2==1 ) :
if win_x != win_y :
win_x=win_y
else :
logger.warning("The aperture (win_x,win_y) must be odd number and greater than 0.")
return None
elif( is_number(window) ):
win_x = window
else :
win_x = 3 #set the default aperture window size (3x3)
if ( not new_version ) :
grayscale_ = grayscale
return self.smooth(algorithm_name='median', aperture=(win_x,win_y),grayscale=grayscale_)
else :
if (grayscale) :
img_medianBlur = cv2.medianBlur(self.getGrayNumpy(),win_x)
return Image(img_medianBlur, colorSpace=ColorSpace.GRAY)
else :
img_medianBlur = cv2.medianBlur(self.getNumpy()[:,:, ::-1].transpose([1,0,2]),win_x)
img_medianBlur = img_medianBlur[:,:, ::-1].transpose([1,0,2])
return Image(img_medianBlur, colorSpace=self._colorSpace)
def bilateralFilter(self, diameter=5,sigmaColor=10, sigmaSpace=10,grayscale=False):
"""
**SUMMARY**
Smooths the image, using bilateral filtering. Potential of bilateral filtering is for the removal of texture.
The optional parameter are diameter, sigmaColor, sigmaSpace.
Bilateral Filter
see : http://en.wikipedia.org/wiki/Bilateral_filter
see : http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html
**Parameters**
* *diameter* - A tuple for the window of the form (diameter,diameter). By default window = (3x3). ( for OpenCV versions <= 2.3.0)
- Diameter of each pixel neighborhood that is used during filtering. ( for OpenCV versions >= 2.3.0)
* *sigmaColor* - Filter the specified value in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in larger areas of semi-equal color.
* *sigmaSpace* - Filter the specified value in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough
**NOTE**
For OpenCV versions <= 2.3.0
-- this acts as Convience function derived from the :py:meth:`smooth` method. Which internally calls cv.Smooth.
-- where aperture(window) is (diameter,diameter)
-- sigmaColor and sigmanSpace become obsolete
For OpenCV versions higher than 2.3.0. i.e >= 2.3.0
-- cv.bilateralFilter function is called
-- If the sigmaColor and sigmaSpace values are small (< 10), the filter will not have much effect, whereas if they are large (> 150), they will have a very strong effect, making the image look 'cartoonish'
-- It is recommended to use diamter=5 for real time applications, and perhaps diameter=9 for offile applications that needs heavy noise filtering.
"""
try:
import cv2
new_version = True
except :
new_version = False
pass
if is_tuple(diameter):
win_x, win_y = diameter
if ( win_x>=0 and win_y>=0 and win_x%2==1 and win_y%2==1 ) :
if win_x != win_y :
diameter = (win_x, win_y)
else :
logger.warning("The aperture (win_x,win_y) must be odd number and greater than 0.")
return None
elif( is_number(diameter) ):
pass
else :
win_x = 3 #set the default aperture window size (3x3)
diameter = (win_x,win_x)
if ( not new_version ) :
grayscale_ = grayscale
if( is_number(diameter) ) :
diameter = (diameter,diameter)
return self.smooth(algorithm_name='bilateral', aperture=diameter,grayscale=grayscale_)
else :
if (grayscale) :
img_bilateral = cv2.bilateralFilter(self.getGrayNumpy(),diameter,sigmaColor, sigmaSpace)
return Image(img_bilateral, colorSpace=ColorSpace.GRAY)
else :
img_bilateral = cv2.bilateralFilter(self.getNumpy()[:,:, ::-1].transpose([1,0,2]),diameter,sigmaColor, sigmaSpace)
img_bilateral = img_bilateral[:,:, ::-1].transpose([1,0,2])
return Image(img_bilateral,colorSpace=self._colorSpace)
def blur(self, window = '', grayscale=False):
"""
**SUMMARY**
Smoothes an image using the normalized box filter.
The optional parameter is window.
see : http://en.wikipedia.org/wiki/Blur
**Parameters**
* *window* - should be in the form a tuple (win_x,win_y).
- By default it is set to 3x3, i.e window = (3x3).
**NOTE**
For OpenCV versions <= 2.3.0
-- this acts as Convience function derived from the :py:meth:`smooth` method. Which internally calls cv.Smooth
For OpenCV versions higher than 2.3.0. i.e >= 2.3.0
-- cv.blur function is called
"""
try:
import cv2
new_version = True
except :
new_version = False
pass
if is_tuple(window):
win_x, win_y = window
if ( win_x<=0 or win_y<=0 ) :
logger.warning("win_x and win_y should be greater than 0.")
return None
elif( is_number(window) ):
window = (window,window)
else :
window = (3,3)
if ( not new_version ) :
grayscale_ = grayscale
return self.smooth(algorithm_name='blur', aperture=window, grayscale=grayscale_)
else :
if grayscale:
img_blur = cv2.blur(self.getGrayNumpy(),window)
return Image(img_blur,colorSpace=ColorSpace.GRAY)
else :
img_blur = cv2.blur(self.getNumpy()[:,:, ::-1].transpose([1,0,2]),window)
img_blur = img_blur[:,:, ::-1].transpose([1,0,2])
return Image(img_blur,colorSpace=self._colorSpace)
def gaussianBlur(self, window = '', sigmaX=0 , sigmaY=0 ,grayscale=False):
"""
**SUMMARY**
Smoothes an image, typically used to reduce image noise and reduce detail.
The optional parameter is window.
see : http://en.wikipedia.org/wiki/Gaussian_blur
**Parameters**
* *window* - should be in the form a tuple (win_x,win_y). Where win_x and win_y should be positive and odd.
- By default it is set to 3x3, i.e window = (3x3).
* *sigmaX* - Gaussian kernel standard deviation in X direction.
* *sigmaY* - Gaussian kernel standard deviation in Y direction.
* *grayscale* - If true, the effect is applied on grayscale images.
**NOTE**
For OpenCV versions <= 2.3.0
-- this acts as Convience function derived from the :py:meth:`smooth` method. Which internally calls cv.Smooth
For OpenCV versions higher than 2.3.0. i.e >= 2.3.0
-- cv.GaussianBlur function is called
"""
try:
import cv2
ver = cv2.__version__
new_version = False
#For OpenCV versions till 2.4.0, cv2.__versions__ are of the form "$Rev: 4557 $"
if not ver.startswith('$Rev:'):
if int(ver.replace('.','0'))>=20300 :
new_version = True
except :
new_version = False
pass
if is_tuple(window):
win_x, win_y = window
if ( win_x>=0 and win_y>=0 and win_x%2==1 and win_y%2==1 ) :
pass
else :
logger.warning("The aperture (win_x,win_y) must be odd number and greater than 0.")
return None
elif (is_number(window)):
window = (window, window)
else:
window = (3,3) #set the default aperture window size (3x3)
if (not new_version):
grayscale_ = grayscale
return self.smooth(algorithm_name='blur', aperture=window, grayscale=grayscale_)
else:
image_gauss = cv2.GaussianBlur(self.getNumpyCv2(), window, sigmaX, sigmaY=sigmaY)
if grayscale:
return Image(image_gauss, colorSpace=ColorSpace.GRAY, cv2image=True)
else:
return Image(image_gauss, colorSpace=self._colorSpace, cv2image=True)
def invert(self):
"""
**SUMMARY**
Invert (negative) the image note that this can also be done with the
unary minus (-) operator. For binary image this turns black into white and white into black (i.e. white is the new black).
**RETURNS**
The opposite of the current image.
**EXAMPLE**
>>> img = Image("polar_bear_in_the_snow.png")
>>> img.invert().save("black_bear_at_night.png")
**SEE ALSO**
:py:meth:`binarize`
"""
return -self
def grayscale(self):
"""
**SUMMARY**
This method returns a gray scale version of the image. It makes everything look like an old movie.
**RETURNS**
A grayscale SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> img.grayscale().binarize().show()
**SEE ALSO**
:py:meth:`binarize`
"""
return Image(self._getGrayscaleBitmap(), colorSpace = ColorSpace.GRAY)
def flipHorizontal(self):
"""
**SUMMARY**
Horizontally mirror an image.
.. Warning::
Note that flip does not mean rotate 180 degrees! The two are different.
**RETURNS**
The flipped SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> upsidedown = img.flipHorizontal()
**SEE ALSO**
:py:meth:`flipVertical`
:py:meth:`rotate`
"""
newimg = self.getEmpty()
cv.Flip(self.getBitmap(), newimg, 1)
return Image(newimg, colorSpace=self._colorSpace)
def flipVertical(self):
"""
**SUMMARY**
Vertically mirror an image.
.. Warning::
Note that flip does not mean rotate 180 degrees! The two are different.
**RETURNS**
The flipped SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> upsidedown = img.flipHorizontal()
**SEE ALSO**
:py:meth:`rotate`
:py:meth:`flipHorizontal`
"""
newimg = self.getEmpty()
cv.Flip(self.getBitmap(), newimg, 0)
return Image(newimg, colorSpace=self._colorSpace)
def stretch(self, thresh_low = 0, thresh_high = 255):
"""
**SUMMARY**
The stretch filter works on a greyscale image, if the image
is color, it returns a greyscale image. The filter works by
taking in a lower and upper threshold. Anything below the lower
threshold is pushed to black (0) and anything above the upper
threshold is pushed to white (255)
**PARAMETERS**
* *thresh_low* - The lower threshold for the stretch operation.
This should be a value between 0 and 255.
* *thresh_high* - The upper threshold for the stretch operation.
This should be a value between 0 and 255.
**RETURNS**
A gray scale version of the image with the appropriate histogram stretching.
**EXAMPLE**
>>> img = Image("orson_welles.jpg")
>>> img2 = img.stretch(56.200)
>>> img2.show()
**NOTES**
TODO - make this work on RGB images with thresholds for each channel.
**SEE ALSO**
:py:meth:`binarize`
:py:meth:`equalize`
"""
try:
newimg = self.getEmpty(1)
cv.Threshold(self._getGrayscaleBitmap(), newimg, thresh_low, 255, cv.CV_THRESH_TOZERO)
cv.Not(newimg, newimg)
cv.Threshold(newimg, newimg, 255 - thresh_high, 255, cv.CV_THRESH_TOZERO)
cv.Not(newimg, newimg)
return Image(newimg)
except:
return None
def gammaCorrect(self, gamma = 1):
"""
**DESCRIPTION**
Transforms an image according to Gamma Correction also known as
Power Law Transform.
**PARAMETERS**
* *gamma* - A non-negative real number.
**RETURNS**
A Gamma corrected image.
**EXAMPLE**
>>> img = Image('SimpleCV/sampleimages/family_watching_television_1958.jpg')
>>> img.show()
>>> img.gammaCorrect(1.5).show()
>>> img.gammaCorrect(0.7).show()
"""
if gamma < 0:
return "Gamma should be a non-negative real number"
scale = 255.0
src = self.getNumpy()
dst = (((1.0/scale)*src)**gamma)*scale
return Image(dst)
def binarize(self, thresh = -1, maxv = 255, blocksize = 0, p = 5):
"""
**SUMMARY**
Do a binary threshold the image, changing all values below thresh to maxv
and all above to black. If a color tuple is provided, each color channel
is thresholded separately.
If threshold is -1 (default), an adaptive method (OTSU's method) is used.
If then a blocksize is specified, a moving average over each region of block*block
pixels a threshold is applied where threshold = local_mean - p.
**PARAMETERS**
* *thresh* - the threshold as an integer or an (r,g,b) tuple , where pixels below (darker) than thresh are set to to max value,
and all values above this value are set to black. If this parameter is -1 we use Otsu's method.
* *maxv* - The maximum value for pixels below the threshold. Ordinarily this should be 255 (white)
* *blocksize* - the size of the block used in the adaptive binarize operation.
.. Warning::
This parameter must be an odd number.
* *p* - The difference from the local mean to use for thresholding in Otsu's method.
**RETURNS**
A binary (two colors, usually black and white) SimpleCV image. This works great for the findBlobs
family of functions.
**EXAMPLE**
Example of a vanila threshold versus an adaptive threshold:
>>> img = Image("orson_welles.jpg")
>>> b1 = img.binarize(128)
>>> b2 = img.binarize(blocksize=11,p=7)
>>> b3 = b1.sideBySide(b2)
>>> b3.show()
**NOTES**
`Otsu's Method Description<http://en.wikipedia.org/wiki/Otsu's_method>`
**SEE ALSO**
:py:meth:`threshold`
:py:meth:`findBlobs`
:py:meth:`invert`
:py:meth:`dilate`
:py:meth:`erode`
"""
if is_tuple(thresh):
r = self.getEmpty(1)
g = self.getEmpty(1)
b = self.getEmpty(1)
cv.Split(self.getBitmap(), b, g, r, None)
cv.Threshold(r, r, thresh[0], maxv, cv.CV_THRESH_BINARY_INV)
cv.Threshold(g, g, thresh[1], maxv, cv.CV_THRESH_BINARY_INV)
cv.Threshold(b, b, thresh[2], maxv, cv.CV_THRESH_BINARY_INV)
cv.Add(r, g, r)
cv.Add(r, b, r)
return Image(r, colorSpace=self._colorSpace)
elif thresh == -1:
newbitmap = self.getEmpty(1)
if blocksize:
cv.AdaptiveThreshold(self._getGrayscaleBitmap(), newbitmap, maxv,
cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C, cv.CV_THRESH_BINARY_INV, blocksize, p)
else:
cv.Threshold(self._getGrayscaleBitmap(), newbitmap, thresh, float(maxv), cv.CV_THRESH_BINARY_INV + cv.CV_THRESH_OTSU)
return Image(newbitmap, colorSpace=self._colorSpace)
else:
newbitmap = self.getEmpty(1)
#desaturate the image, and apply the new threshold
cv.Threshold(self._getGrayscaleBitmap(), newbitmap, thresh, float(maxv), cv.CV_THRESH_BINARY_INV)
return Image(newbitmap, colorSpace=self._colorSpace)
def meanColor(self, colorSpace = None):
"""
**SUMMARY**
This method finds the average color of all the pixels in the image and displays tuple in the colorspace specfied by the user.
If no colorspace is specified , (B,G,R) colorspace is taken as default.
**RETURNS**
A tuple of the average image values. Tuples are in the channel order. *For most images this means the results are (B,G,R).*
**EXAMPLE**
>>> img = Image('lenna')
>>> colors = img.meanColor() # returns tuple in Image's colorspace format.
>>> colors = img.meanColor('BGR') # returns tuple in (B,G,R) format.
>>> colors = img.meanColor('RGB') # returns tuple in (R,G,B) format.
>>> colors = img.meanColor('HSV') # returns tuple in (H,S,V) format.
>>> colors = img.meanColor('XYZ') # returns tuple in (X,Y,Z) format.
>>> colors = img.meanColor('Gray') # returns float of mean intensity.
>>> colors = img.meanColor('YCrCb') # returns tuple in (Y,Cr,Cb) format.
>>> colors = img.meanColor('HLS') # returns tuple in (H,L,S) format.
"""
if colorSpace == None:
return tuple(cv.Avg(self.getBitmap())[0:3])
elif colorSpace == 'BGR':
return tuple(cv.Avg(self.toBGR().getBitmap())[0:3])
elif colorSpace == 'RGB':
return tuple(cv.Avg(self.toRGB().getBitmap())[0:3])
elif colorSpace == 'HSV':
return tuple(cv.Avg(self.toHSV().getBitmap())[0:3])
elif colorSpace == 'XYZ':
return tuple(cv.Avg(self.toXYZ().getBitmap())[0:3])
elif colorSpace == 'Gray':
return (cv.Avg(self._getGrayscaleBitmap())[0])
elif colorSpace == 'YCrCb':
return tuple(cv.Avg(self.toYCrCb().getBitmap())[0:3])
elif colorSpace == 'HLS':
return tuple(cv.Avg(self.toHLS().getBitmap())[0:3])
else:
logger.warning("Image.meanColor: There is no supported conversion to the specified colorspace. Use one of these as argument: 'BGR' , 'RGB' , 'HSV' , 'Gray' , 'XYZ' , 'YCrCb' , 'HLS' .")
return None
def findCorners(self, maxnum = 50, minquality = 0.04, mindistance = 1.0):
"""
**SUMMARY**
This will find corner Feature objects and return them as a FeatureSet
strongest corners first. The parameters give the number of corners to look
for, the minimum quality of the corner feature, and the minimum distance
between corners.
**PARAMETERS**
* *maxnum* - The maximum number of corners to return.
* *minquality* - The minimum quality metric. This shoudl be a number between zero and one.
* *mindistance* - The minimum distance, in pixels, between successive corners.
**RETURNS**
A featureset of :py:class:`Corner` features or None if no corners are found.
**EXAMPLE**
Standard Test:
>>> img = Image("sampleimages/simplecv.png")
>>> corners = img.findCorners()
>>> if corners: True
True
Validation Test:
>>> img = Image("sampleimages/black.png")
>>> corners = img.findCorners()
>>> if not corners: True
True
**SEE ALSO**
:py:class:`Corner`
:py:meth:`findKeypoints`
"""
#initialize buffer frames
eig_image = cv.CreateImage(cv.GetSize(self.getBitmap()), cv.IPL_DEPTH_32F, 1)
temp_image = cv.CreateImage(cv.GetSize(self.getBitmap()), cv.IPL_DEPTH_32F, 1)
corner_coordinates = cv.GoodFeaturesToTrack(self._getGrayscaleBitmap(), eig_image, temp_image, maxnum, minquality, mindistance, None)
corner_features = []
for (x, y) in corner_coordinates:
corner_features.append(Corner(self, x, y))
return FeatureSet(corner_features)
def findBlobs(self, threshval = -1, minsize=10, maxsize=0, threshblocksize=0, threshconstant=5,appx_level=3):
"""
**SUMMARY**
Find blobs will look for continuous
light regions and return them as Blob features in a FeatureSet. Parameters
specify the binarize filter threshold value, and minimum and maximum size for blobs.
If a threshold value is -1, it will use an adaptive threshold. See binarize() for
more information about thresholding. The threshblocksize and threshconstant
parameters are only used for adaptive threshold.
**PARAMETERS**
* *threshval* - the threshold as an integer or an (r,g,b) tuple , where pixels below (darker) than thresh are set to to max value,
and all values above this value are set to black. If this parameter is -1 we use Otsu's method.
* *minsize* - the minimum size of the blobs, in pixels, of the returned blobs. This helps to filter out noise.
* *maxsize* - the maximim size of the blobs, in pixels, of the returned blobs.
* *threshblocksize* - the size of the block used in the adaptive binarize operation. *TODO - make this match binarize*
* *appx_level* - The blob approximation level - an integer for the maximum distance between the true edge and the
approximation edge - lower numbers yield better approximation.
.. warning::
This parameter must be an odd number.
* *threshconstant* - The difference from the local mean to use for thresholding in Otsu's method. *TODO - make this match binarize*
**RETURNS**
Returns a featureset (basically a list) of :py:class:`blob` features. If no blobs are found this method returns None.
**EXAMPLE**
>>> img = Image("lenna")
>>> fs = img.findBlobs()
>>> if( fs is not None ):
>>> fs.draw()
**NOTES**
.. Warning::
For blobs that live right on the edge of the image OpenCV reports the position and width
height as being one over for the true position. E.g. if a blob is at (0,0) OpenCV reports
its position as (1,1). Likewise the width and height for the other corners is reported as
being one less than the width and height. This is a known bug.
**SEE ALSO**
:py:meth:`threshold`
:py:meth:`binarize`
:py:meth:`invert`
:py:meth:`dilate`
:py:meth:`erode`
:py:meth:`findBlobsFromPalette`
:py:meth:`smartFindBlobs`
"""
if (maxsize == 0):
maxsize = self.width * self.height
#create a single channel image, thresholded to parameters
blobmaker = BlobMaker()
blobs = blobmaker.extractFromBinary(self.binarize(threshval, 255, threshblocksize, threshconstant).invert(),
self, minsize = minsize, maxsize = maxsize,appx_level=appx_level)
if not len(blobs):
return None
return FeatureSet(blobs).sortArea()
def findSkintoneBlobs(self, minsize=10, maxsize=0,dilate_iter=1):
"""
**SUMMARY**
Find Skintone blobs will look for continuous
regions of Skintone in a color image and return them as Blob features in a FeatureSet.
Parameters specify the binarize filter threshold value, and minimum and maximum size for
blobs. If a threshold value is -1, it will use an adaptive threshold. See binarize() for
more information about thresholding. The threshblocksize and threshconstant
parameters are only used for adaptive threshold.
**PARAMETERS**
* *minsize* - the minimum size of the blobs, in pixels, of the returned blobs. This helps to filter out noise.
* *maxsize* - the maximim size of the blobs, in pixels, of the returned blobs.
* *dilate_iter* - the number of times to run the dilation operation.
**RETURNS**
Returns a featureset (basically a list) of :py:class:`blob` features. If no blobs are found this method returns None.
**EXAMPLE**
>>> img = Image("lenna")
>>> fs = img.findSkintoneBlobs()
>>> if( fs is not None ):
>>> fs.draw()
**NOTES**
It will be really awesome for making UI type stuff, where you want to track a hand or a face.
**SEE ALSO**
:py:meth:`threshold`
:py:meth:`binarize`
:py:meth:`invert`
:py:meth:`dilate`
:py:meth:`erode`
:py:meth:`findBlobsFromPalette`
:py:meth:`smartFindBlobs`
"""
if (maxsize == 0):
maxsize = self.width * self.height
mask = self.getSkintoneMask(dilate_iter)
blobmaker = BlobMaker()
blobs = blobmaker.extractFromBinary(mask, self, minsize = minsize, maxsize = maxsize)
if not len(blobs):
return None
return FeatureSet(blobs).sortArea()
def getSkintoneMask(self, dilate_iter=0):
"""
**SUMMARY**
Find Skintone mask will look for continuous
regions of Skintone in a color image and return a binary mask where the white pixels denote Skintone region.
**PARAMETERS**
* *dilate_iter* - the number of times to run the dilation operation.
**RETURNS**
Returns a binary mask.
**EXAMPLE**
>>> img = Image("lenna")
>>> mask = img.findSkintoneMask()
>>> mask.show()
"""
if( self._colorSpace != ColorSpace.YCrCb ):
YCrCb = self.toYCrCb()
else:
YCrCb = self
Y = np.ones((256,1),dtype=uint8)*0
Y[5:] = 255
Cr = np.ones((256,1),dtype=uint8)*0
Cr[140:180] = 255
Cb = np.ones((256,1),dtype=uint8)*0
Cb[77:135] = 255
Y_img = YCrCb.getEmpty(1)
Cr_img = YCrCb.getEmpty(1)
Cb_img = YCrCb.getEmpty(1)
cv.Split(YCrCb.getBitmap(),Y_img,Cr_img,Cb_img,None)
cv.LUT(Y_img,Y_img,cv.fromarray(Y))
cv.LUT(Cr_img,Cr_img,cv.fromarray(Cr))
cv.LUT(Cb_img,Cb_img,cv.fromarray(Cb))
temp = self.getEmpty()
cv.Merge(Y_img,Cr_img,Cb_img,None,temp)
mask=Image(temp,colorSpace = ColorSpace.YCrCb)
mask = mask.binarize((128,128,128))
mask = mask.toRGB().binarize()
mask.dilate(dilate_iter)
return mask
#this code is based on code that's based on code from
#http://blog.jozilla.net/2008/06/27/fun-with-python-opencv-and-face-detection/
def findHaarFeatures(self, cascade, scale_factor=1.2, min_neighbors=2, use_canny=cv.CV_HAAR_DO_CANNY_PRUNING, min_size=(20,20), max_size=(1000,1000)):
"""
**SUMMARY**
A Haar like feature cascase is a really robust way of finding the location
of a known object. This technique works really well for a few specific applications
like face, pedestrian, and vehicle detection. It is worth noting that this
approach **IS NOT A MAGIC BULLET** . Creating a cascade file requires a large
number of images that have been sorted by a human.vIf you want to find Haar
Features (useful for face detection among other purposes) this will return
Haar feature objects in a FeatureSet.
For more information, consult the cv.HaarDetectObjects documentation.
To see what features are available run img.listHaarFeatures() or you can
provide your own haarcascade file if you have one available.
Note that the cascade parameter can be either a filename, or a HaarCascade
loaded with cv.Load(), or a SimpleCV HaarCascade object.
**PARAMETERS**
* *cascade* - The Haar Cascade file, this can be either the path to a cascade
file or a HaarCascased SimpleCV object that has already been
loaded.
* *scale_factor* - The scaling factor for subsequent rounds of the Haar cascade
(default 1.2) in terms of a percentage (i.e. 1.2 = 20% increase in size)
* *min_neighbors* - The minimum number of rectangles that makes up an object. Ususally
detected faces are clustered around the face, this is the number
of detections in a cluster that we need for detection. Higher
values here should reduce false positives and decrease false negatives.
* *use-canny* - Whether or not to use Canny pruning to reject areas with too many edges
(default yes, set to 0 to disable)
* *min_size* - Minimum window size. By default, it is set to the size
of samples the classifier has been trained on ((20,20) for face detection)
* *max_size* - Maximum window size. By default, it is set to the size
of samples the classifier has been trained on ((1000,1000) for face detection)
**RETURNS**
A feature set of HaarFeatures
**EXAMPLE**
>>> faces = HaarCascade("./SimpleCV/Features/HaarCascades/face.xml","myFaces")
>>> cam = Camera()
>>> while True:
>>> f = cam.getImage().findHaarFeatures(faces)
>>> if( f is not None ):
>>> f.show()
**NOTES**
OpenCV Docs:
- http://opencv.willowgarage.com/documentation/python/objdetect_cascade_classification.html
Wikipedia:
- http://en.wikipedia.org/wiki/Viola-Jones_object_detection_framework
- http://en.wikipedia.org/wiki/Haar-like_features
The video on this pages shows how Haar features and cascades work to located faces:
- http://dismagazine.com/dystopia/evolved-lifestyles/8115/anti-surveillance-how-to-hide-from-machines/
"""
storage = cv.CreateMemStorage(0)
#lovely. This segfaults if not present
from SimpleCV.Features.HaarCascade import HaarCascade
if isinstance(cascade, basestring):
cascade = HaarCascade(cascade)
if not cascade.getCascade():
return None
elif isinstance(cascade,HaarCascade):
pass
else:
logger.warning('Could not initialize HaarCascade. Enter Valid cascade value.')
# added all of the arguments from the opencv docs arglist
try:
import cv2
haarClassify = cv2.CascadeClassifier(cascade.getFHandle())
objects = haarClassify.detectMultiScale(self.getGrayNumpyCv2(),scaleFactor=scale_factor,minNeighbors=min_neighbors,minSize=min_size,flags=use_canny)
cv2flag = True
except ImportError:
objects = cv.HaarDetectObjects(self._getEqualizedGrayscaleBitmap(),
cascade.getCascade(), storage, scale_factor, min_neighbors,
use_canny, min_size)
cv2flag = False
if objects is not None:
return FeatureSet([HaarFeature(self, o, cascade,cv2flag) for o in objects])
return None
def drawCircle(self, ctr, rad, color = (0, 0, 0), thickness = 1):
"""
**SUMMARY**
Draw a circle on the image.
**PARAMETERS**
* *ctr* - The center of the circle as an (x,y) tuple.
* *rad* - The radius of the circle in pixels
* *color* - A color tuple (default black)
* *thickness* - The thickness of the circle, -1 means filled in.
**RETURNS**
.. Warning::
This is an inline operation. Nothing is returned, but a circle is drawn on the images's
drawing layer.
**EXAMPLE**
>>> img = Image("lenna")
>>> img.drawCircle((img.width/2,img.height/2),r=50,color=Colors.RED,width=3)
>>> img.show()
**NOTES**
.. Warning::
Note that this function is depricated, try to use DrawingLayer.circle() instead.
**SEE ALSO**
:py:meth:`drawLine`
:py:meth:`drawText`
:py:meth:`dl`
:py:meth:`drawRectangle`
:py:class:`DrawingLayer`
"""
if( thickness < 0):
self.getDrawingLayer().circle((int(ctr[0]), int(ctr[1])), int(rad), color, int(thickness),filled=True)
else:
self.getDrawingLayer().circle((int(ctr[0]), int(ctr[1])), int(rad), color, int(thickness))
def drawLine(self, pt1, pt2, color = (0, 0, 0), thickness = 1):
"""
**SUMMARY**
Draw a line on the image.
**PARAMETERS**
* *pt1* - the first point for the line (tuple).
* *pt2* - the second point on the line (tuple).
* *color* - a color tuple (default black).
* *thickness* the thickness of the line in pixels.
**RETURNS**
.. Warning::
This is an inline operation. Nothing is returned, but a circle is drawn on the images's
drawing layer.
**EXAMPLE**
>>> img = Image("lenna")
>>> img.drawLine((0,0),(img.width,img.height),color=Color.RED,thickness=3)
>>> img.show()
**NOTES**
.. Warning::
Note that this function is depricated, try to use DrawingLayer.line() instead.
**SEE ALSO**
:py:meth:`drawText`
:py:meth:`dl`
:py:meth:`drawCircle`
:py:meth:`drawRectangle`
"""
pt1 = (int(pt1[0]), int(pt1[1]))
pt2 = (int(pt2[0]), int(pt2[1]))
self.getDrawingLayer().line(pt1, pt2, color, thickness)
def size(self):
"""
**SUMMARY**
Returns a tuple that lists the width and height of the image.
**RETURNS**
The width and height as a tuple.
"""
if self.width and self.height:
return cv.GetSize(self.getBitmap())
else:
return (0, 0)
def isEmpty(self):
"""
**SUMMARY**
Checks if the image is empty by checking its width and height.
**RETURNS**
True if the image's size is (0, 0), False for any other size.
"""
return self.size() == (0, 0)
def split(self, cols, rows):
"""
**SUMMARY**
This method can be used to brak and image into a series of image chunks.
Given number of cols and rows, splits the image into a cols x rows 2d array
of cropped images
**PARAMETERS**
* *rows* - an integer number of rows.
* *cols* - an integer number of cols.
**RETURNS**
A list of SimpleCV images.
**EXAMPLE**
>>> img = Image("lenna")
>>> quadrant =img.split(2,2)
>>> for f in quadrant:
>>> f.show()
>>> time.sleep(1)
**NOTES**
TODO: This should return and ImageList
"""
crops = []
wratio = self.width / cols
hratio = self.height / rows
for i in range(rows):
row = []
for j in range(cols):
row.append(self.crop(j * wratio, i * hratio, wratio, hratio))
crops.append(row)
return crops
def splitChannels(self, grayscale = True):
"""
**SUMMARY**
Split the channels of an image into RGB (not the default BGR)
single parameter is whether to return the channels as grey images (default)
or to return them as tinted color image
**PARAMETERS**
* *grayscale* - If this is true we return three grayscale images, one per channel.
if it is False return tinted images.
**RETURNS**
A tuple of of 3 image objects.
**EXAMPLE**
>>> img = Image("lenna")
>>> data = img.splitChannels()
>>> for d in data:
>>> d.show()
>>> time.sleep(1)
**SEE ALSO**
:py:meth:`mergeChannels`
"""
r = self.getEmpty(1)
g = self.getEmpty(1)
b = self.getEmpty(1)
cv.Split(self.getBitmap(), b, g, r, None)
red = self.getEmpty()
green = self.getEmpty()
blue = self.getEmpty()
if (grayscale):
cv.Merge(r, r, r, None, red)
cv.Merge(g, g, g, None, green)
cv.Merge(b, b, b, None, blue)
else:
cv.Merge(None, None, r, None, red)
cv.Merge(None, g, None, None, green)
cv.Merge(b, None, None, None, blue)
return (Image(red), Image(green), Image(blue))
def mergeChannels(self,r=None,g=None,b=None):
"""
**SUMMARY**
Merge channels is the oposite of splitChannels. The image takes one image for each
of the R,G,B channels and then recombines them into a single image. Optionally any of these
channels can be None.
**PARAMETERS**
* *r* - The r or last channel of the result SimpleCV Image.
* *g* - The g or center channel of the result SimpleCV Image.
* *b* - The b or first channel of the result SimpleCV Image.
**RETURNS**
A SimpleCV Image.
**EXAMPLE**
>>> img = Image("lenna")
>>> [r,g,b] = img.splitChannels()
>>> r = r.binarize()
>>> g = g.binarize()
>>> b = b.binarize()
>>> result = img.mergeChannels(r,g,b)
>>> result.show()
**SEE ALSO**
:py:meth:`splitChannels`
"""
if( r is None and g is None and b is None ):
logger.warning("ImageClass.mergeChannels - we need at least one valid channel")
return None
if( r is None ):
r = self.getEmpty(1)
cv.Zero(r);
else:
rt = r.getEmpty(1)
cv.Split(r.getBitmap(),rt,rt,rt,None)
r = rt
if( g is None ):
g = self.getEmpty(1)
cv.Zero(g);
else:
gt = g.getEmpty(1)
cv.Split(g.getBitmap(),gt,gt,gt,None)
g = gt
if( b is None ):
b = self.getEmpty(1)
cv.Zero(b);
else:
bt = b.getEmpty(1)
cv.Split(b.getBitmap(),bt,bt,bt,None)
b = bt
retVal = self.getEmpty()
cv.Merge(b,g,r,None,retVal)
return Image(retVal)
def applyHLSCurve(self, hCurve, lCurve, sCurve):
"""
**SUMMARY**
Apply a color correction curve in HSL space. This method can be used
to change values for each channel. The curves are :py:class:`ColorCurve` class objects.
**PARAMETERS**
* *hCurve* - the hue ColorCurve object.
* *lCurve* - the lightnes / value ColorCurve object.
* *sCurve* - the saturation ColorCurve object
**RETURNS**
A SimpleCV Image
**EXAMPLE**
>>> img = Image("lenna")
>>> hc = ColorCurve([[0,0], [100, 120], [180, 230], [255, 255]])
>>> lc = ColorCurve([[0,0], [90, 120], [180, 230], [255, 255]])
>>> sc = ColorCurve([[0,0], [70, 110], [180, 230], [240, 255]])
>>> img2 = img.applyHLSCurve(hc,lc,sc)
**SEE ALSO**
:py:class:`ColorCurve`
:py:meth:`applyRGBCurve`
"""
#TODO CHECK ROI
#TODO CHECK CURVE SIZE
#TODO CHECK COLORSPACE
#TODO CHECK CURVE SIZE
temp = cv.CreateImage(self.size(), 8, 3)
#Move to HLS space
cv.CvtColor(self._bitmap, temp, cv.CV_RGB2HLS)
tempMat = cv.GetMat(temp) #convert the bitmap to a matrix
#now apply the color curve correction
tempMat = np.array(self.getMatrix()).copy()
tempMat[:, :, 0] = np.take(hCurve.mCurve, tempMat[:, :, 0])
tempMat[:, :, 1] = np.take(sCurve.mCurve, tempMat[:, :, 1])
tempMat[:, :, 2] = np.take(lCurve.mCurve, tempMat[:, :, 2])
#Now we jimmy the np array into a cvMat
image = cv.CreateImageHeader((tempMat.shape[1], tempMat.shape[0]), cv.IPL_DEPTH_8U, 3)
cv.SetData(image, tempMat.tostring(), tempMat.dtype.itemsize * 3 * tempMat.shape[1])
cv.CvtColor(image, image, cv.CV_HLS2RGB)
return Image(image, colorSpace=self._colorSpace)
def applyRGBCurve(self, rCurve, gCurve, bCurve):
"""
**SUMMARY**
Apply a color correction curve in RGB space. This method can be used
to change values for each channel. The curves are :py:class:`ColorCurve` class objects.
**PARAMETERS**
* *rCurve* - the red ColorCurve object, or appropriately formatted list
* *gCurve* - the green ColorCurve object, or appropriately formatted list
* *bCurve* - the blue ColorCurve object, or appropriately formatted list
**RETURNS**
A SimpleCV Image
**EXAMPLE**
>>> img = Image("lenna")
>>> rc = ColorCurve([[0,0], [100, 120], [180, 230], [255, 255]])
>>> gc = ColorCurve([[0,0], [90, 120], [180, 230], [255, 255]])
>>> bc = ColorCurve([[0,0], [70, 110], [180, 230], [240, 255]])
>>> img2 = img.applyRGBCurve(rc,gc,bc)
**SEE ALSO**
:py:class:`ColorCurve`
:py:meth:`applyHLSCurve`
"""
if isinstance(bCurve, list):
bCurve = ColorCurve(bCurve)
if isinstance(gCurve, list):
gCurve = ColorCurve(gCurve)
if isinstance(rCurve, list):
rCurve = ColorCurve(rCurve)
tempMat = np.array(self.getMatrix()).copy()
tempMat[:, :, 0] = np.take(bCurve.mCurve, tempMat[:, :, 0])
tempMat[:, :, 1] = np.take(gCurve.mCurve, tempMat[:, :, 1])
tempMat[:, :, 2] = np.take(rCurve.mCurve, tempMat[:, :, 2])
#Now we jimmy the np array into a cvMat
image = cv.CreateImageHeader((tempMat.shape[1], tempMat.shape[0]), cv.IPL_DEPTH_8U, 3)
cv.SetData(image, tempMat.tostring(), tempMat.dtype.itemsize * 3 * tempMat.shape[1])
return Image(image, colorSpace=self._colorSpace)
def applyIntensityCurve(self, curve):
"""
**SUMMARY**
Intensity applied to all three color channels
**PARAMETERS**
* *curve* - a ColorCurve object, or 2d list that can be conditioned into one
**RETURNS**
A SimpleCV Image
**EXAMPLE**
>>> img = Image("lenna")
>>> rc = ColorCurve([[0,0], [100, 120], [180, 230], [255, 255]])
>>> gc = ColorCurve([[0,0], [90, 120], [180, 230], [255, 255]])
>>> bc = ColorCurve([[0,0], [70, 110], [180, 230], [240, 255]])
>>> img2 = img.applyRGBCurve(rc,gc,bc)
**SEE ALSO**
:py:class:`ColorCurve`
:py:meth:`applyHLSCurve`
"""
return self.applyRGBCurve(curve, curve, curve)
def colorDistance(self, color = Color.BLACK):
"""
**SUMMARY**
Returns an image representing the distance of each pixel from a given color
tuple, scaled between 0 (the given color) and 255. Pixels distant from the
given tuple will appear as brighter and pixels closest to the target color
will be darker.
By default this will give image intensity (distance from pure black)
**PARAMETERS**
* *color* - Color object or Color Tuple
**RETURNS**
A SimpleCV Image.
**EXAMPLE**
>>> img = Image("logo")
>>> img2 = img.colorDistance(color=Color.BLACK)
>>> img2.show()
**SEE ALSO**
:py:meth:`binarize`
:py:meth:`hueDistance`
:py:meth:`findBlobsFromMask`
"""
pixels = np.array(self.getNumpy()).reshape(-1, 3) #reshape our matrix to 1xN
distances = spsd.cdist(pixels, [color]) #calculate the distance each pixel is
distances *= (255.0/distances.max()) #normalize to 0 - 255
return Image(distances.reshape(self.width, self.height)) #return an Image
def hueDistance(self, color = Color.BLACK, minsaturation = 20, minvalue = 20, maxvalue=255):
"""
**SUMMARY**
Returns an image representing the distance of each pixel from the given hue
of a specific color. The hue is "wrapped" at 180, so we have to take the shorter
of the distances between them -- this gives a hue distance of max 90, which we'll
scale into a 0-255 grayscale image.
The minsaturation and minvalue are optional parameters to weed out very weak hue
signals in the picture, they will be pushed to max distance [255]
**PARAMETERS**
* *color* - Color object or Color Tuple.
* *minsaturation* - the minimum saturation value for color (from 0 to 255).
* *minvalue* - the minimum hue value for the color (from 0 to 255).
**RETURNS**
A simpleCV image.
**EXAMPLE**
>>> img = Image("logo")
>>> img2 = img.hueDistance(color=Color.BLACK)
>>> img2.show()
**SEE ALSO**
:py:meth:`binarize`
:py:meth:`hueDistance`
:py:meth:`morphOpen`
:py:meth:`morphClose`
:py:meth:`morphGradient`
:py:meth:`findBlobsFromMask`
"""
if isinstance(color, (float,int,long,complex)):
color_hue = color
else:
color_hue = Color.hsv(color)[0]
vsh_matrix = self.toHSV().getNumpy().reshape(-1,3) #again, gets transposed to vsh
hue_channel = np.cast['int'](vsh_matrix[:,2])
if color_hue < 90:
hue_loop = 180
else:
hue_loop = -180
#set whether we need to move back or forward on the hue circle
distances = np.minimum( np.abs(hue_channel - color_hue), np.abs(hue_channel - (color_hue + hue_loop)))
#take the minimum distance for each pixel
distances = np.where(
np.logical_and(vsh_matrix[:,0] > minvalue, vsh_matrix[:,1] > minsaturation),
distances * (255.0 / 90.0), #normalize 0 - 90 -> 0 - 255
255.0) #use the maxvalue if it false outside of our value/saturation tolerances
return Image(distances.reshape(self.width, self.height))
def erode(self, iterations=1, kernelsize=3):
"""
**SUMMARY**
Apply a morphological erosion. An erosion has the effect of removing small bits of noise
and smothing blobs.
This implementation uses the default openCV 3X3 square kernel
Erosion is effectively a local minima detector, the kernel moves over the image and
takes the minimum value inside the kernel.
iterations - this parameters is the number of times to apply/reapply the operation
* See: http://en.wikipedia.org/wiki/Erosion_(morphology).
* See: http://opencv.willowgarage.com/documentation/cpp/image_filtering.html#cv-erode
* Example Use: A threshold/blob image has 'salt and pepper' noise.
* Example Code: /examples/MorphologyExample.py
**PARAMETERS**
* *iterations* - the number of times to run the erosion operation.
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> derp = img.binarize()
>>> derp.erode(3).show()
**SEE ALSO**
:py:meth:`dilate`
:py:meth:`binarize`
:py:meth:`morphOpen`
:py:meth:`morphClose`
:py:meth:`morphGradient`
:py:meth:`findBlobsFromMask`
"""
retVal = self.getEmpty()
kern = cv.CreateStructuringElementEx(kernelsize,kernelsize, 1, 1, cv.CV_SHAPE_RECT)
cv.Erode(self.getBitmap(), retVal, kern, iterations)
return Image(retVal, colorSpace=self._colorSpace)
def dilate(self, iterations=1):
"""
**SUMMARY**
Apply a morphological dilation. An dilation has the effect of smoothing blobs while
intensifying the amount of noise blobs.
This implementation uses the default openCV 3X3 square kernel
Erosion is effectively a local maxima detector, the kernel moves over the image and
takes the maxima value inside the kernel.
* See: http://en.wikipedia.org/wiki/Dilation_(morphology)
* See: http://opencv.willowgarage.com/documentation/cpp/image_filtering.html#cv-dilate
* Example Use: A part's blob needs to be smoother
* Example Code: ./examples/MorphologyExample.py
**PARAMETERS**
* *iterations* - the number of times to run the dilation operation.
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> derp = img.binarize()
>>> derp.dilate(3).show()
**SEE ALSO**
:py:meth:`erode`
:py:meth:`binarize`
:py:meth:`morphOpen`
:py:meth:`morphClose`
:py:meth:`morphGradient`
:py:meth:`findBlobsFromMask`
"""
retVal = self.getEmpty()
kern = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_RECT)
cv.Dilate(self.getBitmap(), retVal, kern, iterations)
return Image(retVal, colorSpace=self._colorSpace)
def morphOpen(self):
"""
**SUMMARY**
morphologyOpen applies a morphological open operation which is effectively
an erosion operation followed by a morphological dilation. This operation
helps to 'break apart' or 'open' binary regions which are close together.
* `Morphological opening on Wikipedia <http://en.wikipedia.org/wiki/Opening_(morphology)>`_
* `OpenCV documentation <http://opencv.willowgarage.com/documentation/cpp/image_filtering.html#cv-morphologyex>`_
* Example Use: two part blobs are 'sticking' together.
* Example Code: ./examples/MorphologyExample.py
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> derp = img.binarize()
>>> derp.morphOpen.show()
**SEE ALSO**
:py:meth:`erode`
:py:meth:`dilate`
:py:meth:`binarize`
:py:meth:`morphClose`
:py:meth:`morphGradient`
:py:meth:`findBlobsFromMask`
"""
retVal = self.getEmpty()
temp = self.getEmpty()
kern = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_RECT)
try:
cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.MORPH_OPEN, 1)
except:
cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.CV_MOP_OPEN, 1)
#OPENCV 2.2 vs 2.3 compatability
return( Image(retVal) )
def morphClose(self):
"""
**SUMMARY**
morphologyClose applies a morphological close operation which is effectively
a dilation operation followed by a morphological erosion. This operation
helps to 'bring together' or 'close' binary regions which are close together.
* See: `Closing <http://en.wikipedia.org/wiki/Closing_(morphology)>`_
* See: `Morphology from OpenCV <http://opencv.willowgarage.com/documentation/cpp/image_filtering.html#cv-morphologyex>`_
* Example Use: Use when a part, which should be one blob is really two blobs.
* Example Code: ./examples/MorphologyExample.py
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> derp = img.binarize()
>>> derp.morphClose.show()
**SEE ALSO**
:py:meth:`erode`
:py:meth:`dilate`
:py:meth:`binarize`
:py:meth:`morphOpen`
:py:meth:`morphGradient`
:py:meth:`findBlobsFromMask`
"""
retVal = self.getEmpty()
temp = self.getEmpty()
kern = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_RECT)
try:
cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.MORPH_CLOSE, 1)
except:
cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.CV_MOP_CLOSE, 1)
#OPENCV 2.2 vs 2.3 compatability
return Image(retVal, colorSpace=self._colorSpace)
def morphGradient(self):
"""
**SUMMARY**
The morphological gradient is the difference betwen the morphological
dilation and the morphological gradient. This operation extracts the
edges of a blobs in the image.
* `See Morph Gradient of Wikipedia <http://en.wikipedia.org/wiki/Morphological_Gradient>`_
* `OpenCV documentation <http://opencv.willowgarage.com/documentation/cpp/image_filtering.html#cv-morphologyex>`_
* Example Use: Use when you have blobs but you really just want to know the blob edges.
* Example Code: ./examples/MorphologyExample.py
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> derp = img.binarize()
>>> derp.morphGradient.show()
**SEE ALSO**
:py:meth:`erode`
:py:meth:`dilate`
:py:meth:`binarize`
:py:meth:`morphOpen`
:py:meth:`morphClose`
:py:meth:`findBlobsFromMask`
"""
retVal = self.getEmpty()
temp = self.getEmpty()
kern = cv.CreateStructuringElementEx(3, 3, 1, 1, cv.CV_SHAPE_RECT)
try:
cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.MORPH_GRADIENT, 1)
except:
cv.MorphologyEx(self.getBitmap(), retVal, temp, kern, cv.CV_MOP_GRADIENT, 1)
return Image(retVal, colorSpace=self._colorSpace )
def histogram(self, numbins = 50):
"""
**SUMMARY**
Return a numpy array of the 1D histogram of intensity for pixels in the image
Single parameter is how many "bins" to have.
**PARAMETERS**
* *numbins* - An interger number of bins in a histogram.
**RETURNS**
A list of histogram bin values.
**EXAMPLE**
>>> img = Image('lenna')
>>> hist = img.histogram()
**SEE ALSO**
:py:meth:`hueHistogram`
"""
gray = self._getGrayscaleBitmap()
(hist, bin_edges) = np.histogram(np.asarray(cv.GetMat(gray)), bins=numbins)
return hist.tolist()
def hueHistogram(self, bins = 179, dynamicRange=True):
"""
**SUMMARY**
Returns the histogram of the hue channel for the image
**PARAMETERS**
* *numbins* - An interger number of bins in a histogram.
**RETURNS**
A list of histogram bin values.
**SEE ALSO**
:py:meth:`histogram`
"""
if dynamicRange:
return np.histogram(self.toHSV().getNumpy()[:,:,2], bins = bins)[0]
else:
return np.histogram(self.toHSV().getNumpy()[:,:,2], bins = bins, range=(0.0,360.0))[0]
def huePeaks(self, bins = 179):
"""
**SUMMARY**
Takes the histogram of hues, and returns the peak hue values, which
can be useful for determining what the "main colors" in a picture.
The bins parameter can be used to lump hues together, by default it is 179
(the full resolution in OpenCV's HSV format)
Peak detection code taken from https://gist.github.com/1178136
Converted from/based on a MATLAB script at http://billauer.co.il/peakdet.html
Returns a list of tuples, each tuple contains the hue, and the fraction
of the image that has it.
**PARAMETERS**
* *bins* - the integer number of bins, between 0 and 179.
**RETURNS**
A list of (hue,fraction) tuples.
"""
# keyword arguments:
# y_axis -- A list containg the signal over which to find peaks
# x_axis -- A x-axis whose values correspond to the 'y_axis' list and is used
# in the return to specify the postion of the peaks. If omitted the index
# of the y_axis is used. (default: None)
# lookahead -- (optional) distance to look ahead from a peak candidate to
# determine if it is the actual peak (default: 500)
# '(sample / period) / f' where '4 >= f >= 1.25' might be a good value
# delta -- (optional) this specifies a minimum difference between a peak and
# the following points, before a peak may be considered a peak. Useful
# to hinder the algorithm from picking up false peaks towards to end of
# the signal. To work well delta should be set to 'delta >= RMSnoise * 5'.
# (default: 0)
# Delta function causes a 20% decrease in speed, when omitted
# Correctly used it can double the speed of the algorithm
# return -- Each cell of the lists contains a tupple of:
# (position, peak_value)
# to get the average peak value do 'np.mean(maxtab, 0)[1]' on the results
y_axis, x_axis = np.histogram(self.toHSV().getNumpy()[:,:,2], bins = bins)
x_axis = x_axis[0:bins]
lookahead = int(bins / 17)
delta = 0
maxtab = []
mintab = []
dump = [] #Used to pop the first hit which always if false
length = len(y_axis)
if x_axis is None:
x_axis = range(length)
#perform some checks
if length != len(x_axis):
raise ValueError, "Input vectors y_axis and x_axis must have same length"
if lookahead < 1:
raise ValueError, "Lookahead must be above '1' in value"
if not (np.isscalar(delta) and delta >= 0):
raise ValueError, "delta must be a positive number"
#needs to be a numpy array
y_axis = np.asarray(y_axis)
#maxima and minima candidates are temporarily stored in
#mx and mn respectively
mn, mx = np.Inf, -np.Inf
#Only detect peak if there is 'lookahead' amount of points after it
for index, (x, y) in enumerate(zip(x_axis[:-lookahead], y_axis[:-lookahead])):
if y > mx:
mx = y
mxpos = x
if y < mn:
mn = y
mnpos = x
####look for max####
if y < mx-delta and mx != np.Inf:
#Maxima peak candidate found
#look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index:index+lookahead].max() < mx:
maxtab.append((mxpos, mx))
dump.append(True)
#set algorithm to only find minima now
mx = np.Inf
mn = np.Inf
####look for min####
if y > mn+delta and mn != -np.Inf:
#Minima peak candidate found
#look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index:index+lookahead].min() > mn:
mintab.append((mnpos, mn))
dump.append(False)
#set algorithm to only find maxima now
mn = -np.Inf
mx = -np.Inf
#Remove the false hit on the first value of the y_axis
try:
if dump[0]:
maxtab.pop(0)
#print "pop max"
else:
mintab.pop(0)
#print "pop min"
del dump
except IndexError:
#no peaks were found, should the function return empty lists?
pass
huetab = []
for hue, pixelcount in maxtab:
huetab.append((hue, pixelcount / float(self.width * self.height)))
return huetab
def __getitem__(self, coord):
ret = self.getMatrix()[tuple(reversed(coord))]
if (type(ret) == cv.cvmat):
(width, height) = cv.GetSize(ret)
newmat = cv.CreateMat(height, width, ret.type)
cv.Copy(ret, newmat) #this seems to be a bug in opencv
#if you don't copy the matrix slice, when you convert to bmp you get
#a slice-sized hunk starting at 0, 0
return Image(newmat)
if self.isBGR():
return tuple(reversed(ret))
else:
return tuple(ret)
def __setitem__(self, coord, value):
value = tuple(reversed(value)) #RGB -> BGR
if(isinstance(coord[0],slice)):
cv.Set(self.getMatrix()[tuple(reversed(coord))], value)
self._clearBuffers("_matrix")
else:
self.getMatrix()[tuple(reversed(coord))] = value
self._clearBuffers("_matrix")
def __sub__(self, other):
newbitmap = self.getEmpty()
if is_number(other):
cv.SubS(self.getBitmap(), cv.Scalar(other,other,other), newbitmap)
else:
cv.Sub(self.getBitmap(), other.getBitmap(), newbitmap)
return Image(newbitmap, colorSpace=self._colorSpace)
def __add__(self, other):
newbitmap = self.getEmpty()
if is_number(other):
cv.AddS(self.getBitmap(), cv.Scalar(other,other,other), newbitmap)
else:
cv.Add(self.getBitmap(), other.getBitmap(), newbitmap)
return Image(newbitmap, colorSpace=self._colorSpace)
def __and__(self, other):
newbitmap = self.getEmpty()
if is_number(other):
cv.AndS(self.getBitmap(), cv.Scalar(other,other,other), newbitmap)
else:
cv.And(self.getBitmap(), other.getBitmap(), newbitmap)
return Image(newbitmap, colorSpace=self._colorSpace)
def __or__(self, other):
newbitmap = self.getEmpty()
if is_number(other):
cv.OrS(self.getBitmap(), cv.Scalar(other,other,other), newbitmap)
else:
cv.Or(self.getBitmap(), other.getBitmap(), newbitmap)
return Image(newbitmap, colorSpace=self._colorSpace)
def __div__(self, other):
newbitmap = self.getEmpty()
if (not is_number(other)):
cv.Div(self.getBitmap(), other.getBitmap(), newbitmap)
else:
cv.ConvertScale(self.getBitmap(), newbitmap, 1.0/float(other))
return Image(newbitmap, colorSpace=self._colorSpace)
def __mul__(self, other):
newbitmap = self.getEmpty()
if (not is_number(other)):
cv.Mul(self.getBitmap(), other.getBitmap(), newbitmap)
else:
cv.ConvertScale(self.getBitmap(), newbitmap, float(other))
return Image(newbitmap, colorSpace=self._colorSpace)
def __pow__(self, other):
newbitmap = self.getEmpty()
cv.Pow(self.getBitmap(), newbitmap, other)
return Image(newbitmap, colorSpace=self._colorSpace)
def __neg__(self):
newbitmap = self.getEmpty()
cv.Not(self.getBitmap(), newbitmap)
return Image(newbitmap, colorSpace=self._colorSpace)
def __invert__(self):
return self.invert()
def max(self, other):
"""
**SUMMARY**
The maximum value of my image, and the other image, in each channel
If other is a number, returns the maximum of that and the number
**PARAMETERS**
* *other* - Image of the same size or a number.
**RETURNS**
A SimpelCV image.
"""
newbitmap = self.getEmpty()
if is_number(other):
cv.MaxS(self.getBitmap(), other, newbitmap)
else:
if self.size() != other.size():
warnings.warn("Both images should have same sizes. Returning None.")
return None
cv.Max(self.getBitmap(), other.getBitmap(), newbitmap)
return Image(newbitmap, colorSpace=self._colorSpace)
def min(self, other):
"""
**SUMMARY**
The minimum value of my image, and the other image, in each channel
If other is a number, returns the minimum of that and the number
**Parameter**
* *other* - Image of the same size or number
**Returns**
IMAGE
"""
newbitmap = self.getEmpty()
if is_number(other):
cv.MinS(self.getBitmap(), other, newbitmap)
else:
if self.size() != other.size():
warnings.warn("Both images should have same sizes. Returning None.")
return None
cv.Min(self.getBitmap(), other.getBitmap(), newbitmap)
return Image(newbitmap, colorSpace=self._colorSpace)
def _clearBuffers(self, clearexcept = "_bitmap"):
for k, v in self._initialized_buffers.items():
if k == clearexcept:
continue
self.__dict__[k] = v
def findBarcode(self,doZLib=True,zxing_path=""):
"""
**SUMMARY**
This function requires zbar and the zbar python wrapper
to be installed or zxing and the zxing python library.
**ZBAR**
To install please visit:
http://zbar.sourceforge.net/
On Ubuntu Linux 12.04 or greater:
sudo apt-get install python-zbar
**ZXING**
If you have the python-zxing library installed, you can find 2d and 1d
barcodes in your image. These are returned as Barcode feature objects
in a FeatureSet. The single parameter is the ZXing_path along with
setting the doZLib flag to False. You do not need the parameter if you
don't have the ZXING_LIBRARY env parameter set.
You can clone python-zxing at:
http://github.com/oostendo/python-zxing
**INSTALLING ZEBRA CROSSING**
* Download the latest version of zebra crossing from: http://code.google.com/p/zxing/
* unpack the zip file where ever you see fit
>>> cd zxing-x.x, where x.x is the version number of zebra crossing
>>> ant -f core/build.xml
>>> ant -f javase/build.xml
This should build the library, but double check the readme
* Get our helper library
>>> git clone git://github.com/oostendo/python-zxing.git
>>> cd python-zxing
>>> python setup.py install
* Our library does not have a setup file. You will need to add
it to your path variables. On OSX/Linux use a text editor to modify your shell file (e.g. .bashrc)
export ZXING_LIBRARY=<FULL PATH OF ZXING LIBRARY - (i.e. step 2)>
for example:
export ZXING_LIBRARY=/my/install/path/zxing-x.x/
On windows you will need to add these same variables to the system variable, e.g.
http://www.computerhope.com/issues/ch000549.htm
* On OSX/Linux source your shell rc file (e.g. source .bashrc). Windows users may need to restart.
* Go grab some barcodes!
.. Warning::
Users on OSX may see the following error:
RuntimeWarning: tmpnam is a potential security risk to your program
We are working to resolve this issue. For normal use this should not be a problem.
**Returns**
A :py:class:`FeatureSet` of :py:class:`Barcode` objects. If no barcodes are detected the method returns None.
**EXAMPLE**
>>> bc = cam.getImage()
>>> barcodes = img.findBarcodes()
>>> for b in barcodes:
>>> b.draw()
**SEE ALSO**
:py:class:`FeatureSet`
:py:class:`Barcode`
"""
if( doZLib ):
try:
import zbar
except:
logger.warning('The zbar library is not installed, please install to read barcodes')
return None
#configure zbar
scanner = zbar.ImageScanner()
scanner.parse_config('enable')
raw = self.getPIL().convert('L').tostring()
width = self.width
height = self.height
# wrap image data
image = zbar.Image(width, height, 'Y800', raw)
# scan the image for barcodes
scanner.scan(image)
barcode = None
# extract results
for symbol in image:
# do something useful with results
barcode = symbol
# clean up
del(image)
else:
if not ZXING_ENABLED:
warnings.warn("Zebra Crossing (ZXing) Library not installed. Please see the release notes.")
return None
if (not self._barcodeReader):
if not zxing_path:
self._barcodeReader = zxing.BarCodeReader()
else:
self._barcodeReader = zxing.BarCodeReader(zxing_path)
tmp_filename = os.tmpnam() + ".png"
self.save(tmp_filename)
barcode = self._barcodeReader.decode(tmp_filename)
os.unlink(tmp_filename)
if barcode:
f = Barcode(self, barcode)
return FeatureSet([f])
else:
return None
#this function contains two functions -- the basic edge detection algorithm
#and then a function to break the lines down given a threshold parameter
def findLines(self, threshold=80, minlinelength=30, maxlinegap=10, cannyth1=50, cannyth2=100, useStandard=False, nLines=-1, maxpixelgap=1):
"""
**SUMMARY**
findLines will find line segments in your image and returns line feature
objects in a FeatureSet. This method uses the Hough (pronounced "HUFF") transform.
See http://en.wikipedia.org/wiki/Hough_transform
**PARAMETERS**
* *threshold* - which determines the minimum "strength" of the line.
* *minlinelength* - how many pixels long the line must be to be returned.
* *maxlinegap* - how much gap is allowed between line segments to consider them the same line .
* *cannyth1* - thresholds used in the edge detection step, refer to :py:meth:`_getEdgeMap` for details.
* *cannyth2* - thresholds used in the edge detection step, refer to :py:meth:`_getEdgeMap` for details.
* *useStandard* - use standard or probabilistic Hough transform.
* *nLines* - maximum number of lines for return.
* *maxpixelgap* - how much distance between pixels is allowed to consider them the same line.
**RETURNS**
Returns a :py:class:`FeatureSet` of :py:class:`Line` objects. If no lines are found the method returns None.
**EXAMPLE**
>>> img = Image("lenna")
>>> lines = img.findLines()
>>> lines.draw()
>>> img.show()
**SEE ALSO**
:py:class:`FeatureSet`
:py:class:`Line`
:py:meth:`edges`
"""
em = self._getEdgeMap(cannyth1, cannyth2)
linesFS = FeatureSet()
if useStandard:
lines = cv.HoughLines2(em, cv.CreateMemStorage(), cv.CV_HOUGH_STANDARD, 1.0, cv.CV_PI/180.0, threshold, minlinelength, maxlinegap)
if nLines == -1:
nLines = len(lines)
# All white points (edges) in Canny edge image
em = Image(em)
x,y = np.where(em.getGrayNumpy() > 128)
# Put points in dictionary for fast checkout if point is white
pts = dict((p, 1) for p in zip(x, y))
w, h = self.width-1, self.height-1
for rho, theta in lines[:nLines]:
ep = []
ls = []
a = math.cos(theta)
b = math.sin(theta)
# Find endpoints of line on the image's edges
if round(b, 4) == 0: # slope of the line is infinity
ep.append( (int(round(abs(rho))), 0) )
ep.append( (int(round(abs(rho))), h) )
elif round(a, 4) == 0: # slope of the line is zero
ep.append( (0, int(round(abs(rho)))) )
ep.append( (w, int(round(abs(rho)))) )
else:
# top edge
x = rho/float(a)
if 0 <= x <= w:
ep.append((int(round(x)), 0))
# bottom edge
x = (rho - h*b)/float(a)
if 0 <= x <= w:
ep.append((int(round(x)), h))
# left edge
y = rho/float(b)
if 0 <= y <= h:
ep.append((0, int(round(y))))
# right edge
y = (rho - w*a)/float(b)
if 0 <= y <= h:
ep.append((w, int(round(y))))
ep = list(set(ep)) # remove duplicates if line crosses the image at corners
ep.sort()
brl = self.bresenham_line(ep[0], ep[1])
# Follow the points on Bresenham's line. Look for white points.
# If the distance between two adjacent white points (dist) is less than or
# equal maxpixelgap then consider them the same line. If dist is bigger
# maxpixelgap then check if length of the line is bigger than minlinelength.
# If so then add line.
dist = float('inf') # distance between two adjacent white points
len_l = float('-inf') # length of the line
for p in brl:
if p in pts:
if dist > maxpixelgap: # found the end of the previous line and the start of the new line
if len_l >= minlinelength:
if ls:
# If the gap between current line and previous
# is less than maxlinegap then merge this lines
l = ls[-1]
gap = round(math.sqrt( (start_p[0]-l[1][0])**2 + (start_p[1]-l[1][1])**2 ))
if gap <= maxlinegap:
ls.pop()
start_p = l[0]
ls.append( (start_p, last_p) )
# First white point of the new line found
dist = 1
len_l = 1
start_p = p # first endpoint of the line
else:
# dist is less than or equal maxpixelgap, so line doesn't end yet
len_l += dist
dist = 1
last_p = p # last white point
else:
dist += 1
for l in ls:
linesFS.append(Line(self, l))
linesFS = linesFS[:nLines]
else:
lines = cv.HoughLines2(em, cv.CreateMemStorage(), cv.CV_HOUGH_PROBABILISTIC, 1.0, cv.CV_PI/180.0, threshold, minlinelength, maxlinegap)
if nLines == -1:
nLines = len(lines)
for l in lines[:nLines]:
linesFS.append(Line(self, l))
return linesFS
def findChessboard(self, dimensions = (8, 5), subpixel = True):
"""
**SUMMARY**
Given an image, finds a chessboard within that image. Returns the Chessboard featureset.
The Chessboard is typically used for calibration because of its evenly spaced corners.
The single parameter is the dimensions of the chessboard, typical one can be found in \SimpleCV\tools\CalibGrid.png
**PARAMETERS**
* *dimensions* - A tuple of the size of the chessboard in width and height in grid objects.
* *subpixel* - Boolean if True use sub-pixel accuracy, otherwise use regular pixel accuracy.
**RETURNS**
A :py:class:`FeatureSet` of :py:class:`Chessboard` objects. If no chessboards are found None is returned.
**EXAMPLE**
>>> img = cam.getImage()
>>> cb = img.findChessboard()
>>> cb.draw()
**SEE ALSO**
:py:class:`FeatureSet`
:py:class:`Chessboard`
"""
corners = cv.FindChessboardCorners(self._getEqualizedGrayscaleBitmap(), dimensions, cv.CV_CALIB_CB_ADAPTIVE_THRESH + cv.CV_CALIB_CB_NORMALIZE_IMAGE )
if(len(corners[1]) == dimensions[0]*dimensions[1]):
if (subpixel):
spCorners = cv.FindCornerSubPix(self.getGrayscaleMatrix(), corners[1], (11, 11), (-1, -1), (cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 10, 0.01))
else:
spCorners = corners[1]
return FeatureSet([ Chessboard(self, dimensions, spCorners) ])
else:
return None
def edges(self, t1=50, t2=100):
"""
**SUMMARY**
Finds an edge map Image using the Canny edge detection method. Edges will be brighter than the surrounding area.
The t1 parameter is roughly the "strength" of the edge required, and the value between t1 and t2 is used for edge linking.
For more information:
* http://opencv.willowgarage.com/documentation/python/imgproc_feature_detection.html
* http://en.wikipedia.org/wiki/Canny_edge_detector
**PARAMETERS**
* *t1* - Int - the lower Canny threshold.
* *t2* - Int - the upper Canny threshold.
**RETURNS**
A SimpleCV image where the edges are white on a black background.
**EXAMPLE**
>>> cam = Camera()
>>> while True:
>>> cam.getImage().edges().show()
**SEE ALSO**
:py:meth:`findLines`
"""
return Image(self._getEdgeMap(t1, t2), colorSpace=self._colorSpace)
def _getEdgeMap(self, t1=50, t2=100):
"""
Return the binary bitmap which shows where edges are in the image. The two
parameters determine how much change in the image determines an edge,
and how edges are linked together. For more information refer to:
http://en.wikipedia.org/wiki/Canny_edge_detector
http://opencv.willowgarage.com/documentation/python/imgproc_feature_detection.html?highlight=canny#Canny
"""
if (self._edgeMap and self._cannyparam[0] == t1 and self._cannyparam[1] == t2):
return self._edgeMap
self._edgeMap = self.getEmpty(1)
cv.Canny(self._getGrayscaleBitmap(), self._edgeMap, t1, t2)
self._cannyparam = (t1, t2)
return self._edgeMap
def rotate(self, angle, fixed=True, point=[-1, -1], scale = 1.0):
"""
**SUMMARY***
This function rotates an image around a specific point by the given angle
By default in "fixed" mode, the returned Image is the same dimensions as the original Image, and the contents will be scaled to fit. In "full" mode the
contents retain the original size, and the Image object will scale
by default, the point is the center of the image.
you can also specify a scaling parameter
.. Note:
that when fixed is set to false selecting a rotation point has no effect since the image is move to fit on the screen.
**PARAMETERS**
* *angle* - angle in degrees positive is clockwise, negative is counter clockwise
* *fixed* - if fixed is true,keep the original image dimensions, otherwise scale the image to fit the rotation
* *point* - the point about which we want to rotate, if none is defined we use the center.
* *scale* - and optional floating point scale parameter.
**RETURNS**
The rotated SimpleCV image.
**EXAMPLE**
>>> img = Image('logo')
>>> img2 = img.rotate( 73.00, point=(img.width/2,img.height/2))
>>> img3 = img.rotate( 73.00, fixed=False, point=(img.width/2,img.height/2))
>>> img4 = img2.sideBySide(img3)
>>> img4.show()
**SEE ALSO**
:py:meth:`rotate90`
"""
if( point[0] == -1 or point[1] == -1 ):
point[0] = (self.width-1)/2
point[1] = (self.height-1)/2
if (fixed):
retVal = self.getEmpty()
cv.Zero(retVal)
rotMat = cv.CreateMat(2, 3, cv.CV_32FC1)
cv.GetRotationMatrix2D((float(point[0]), float(point[1])), float(angle), float(scale), rotMat)
cv.WarpAffine(self.getBitmap(), retVal, rotMat)
return Image(retVal, colorSpace=self._colorSpace)
#otherwise, we're expanding the matrix to fit the image at original size
rotMat = cv.CreateMat(2, 3, cv.CV_32FC1)
# first we create what we thing the rotation matrix should be
cv.GetRotationMatrix2D((float(point[0]), float(point[1])), float(angle), float(scale), rotMat)
A = np.array([0, 0, 1])
B = np.array([self.width, 0, 1])
C = np.array([self.width, self.height, 1])
D = np.array([0, self.height, 1])
#So we have defined our image ABC in homogenous coordinates
#and apply the rotation so we can figure out the image size
a = np.dot(rotMat, A)
b = np.dot(rotMat, B)
c = np.dot(rotMat, C)
d = np.dot(rotMat, D)
#I am not sure about this but I think the a/b/c/d are transposed
#now we calculate the extents of the rotated components.
minY = min(a[1], b[1], c[1], d[1])
minX = min(a[0], b[0], c[0], d[0])
maxY = max(a[1], b[1], c[1], d[1])
maxX = max(a[0], b[0], c[0], d[0])
#from the extents we calculate the new size
newWidth = np.ceil(maxX-minX)
newHeight = np.ceil(maxY-minY)
#now we calculate a new translation
tX = 0
tY = 0
#calculate the translation that will get us centered in the new image
if( minX < 0 ):
tX = -1.0*minX
elif(maxX > newWidth-1 ):
tX = -1.0*(maxX-newWidth)
if( minY < 0 ):
tY = -1.0*minY
elif(maxY > newHeight-1 ):
tY = -1.0*(maxY-newHeight)
#now we construct an affine map that will the rotation and scaling we want with the
#the corners all lined up nicely with the output image.
src = ((A[0], A[1]), (B[0], B[1]), (C[0], C[1]))
dst = ((a[0]+tX, a[1]+tY), (b[0]+tX, b[1]+tY), (c[0]+tX, c[1]+tY))
cv.GetAffineTransform(src, dst, rotMat)
#calculate the translation of the corners to center the image
#use these new corner positions as the input to cvGetAffineTransform
retVal = cv.CreateImage((int(newWidth), int(newHeight)), 8, int(3))
cv.Zero(retVal)
cv.WarpAffine(self.getBitmap(), retVal, rotMat)
#cv.AddS(retVal,(0,255,0),retVal)
return Image(retVal, colorSpace=self._colorSpace)
def transpose(self):
"""
**SUMMARY**
Does a fast 90 degree rotation to the right with a flip.
.. Warning::
Subsequent calls to this function *WILL NOT* keep rotating it to the right!!!
This function just does a matrix transpose so following one transpose by another will
just yield the original image.
**RETURNS**
The rotated SimpleCV Image.
**EXAMPLE**
>>> img = Image("logo")
>>> img2 = img.transpose()
>>> img2.show()
**SEE ALSO**
:py:meth:`rotate`
"""
retVal = cv.CreateImage((self.height, self.width), cv.IPL_DEPTH_8U, 3)
cv.Transpose(self.getBitmap(), retVal)
return(Image(retVal, colorSpace=self._colorSpace))
def shear(self, cornerpoints):
"""
**SUMMARY**
Given a set of new corner points in clockwise order, return a shear-ed image
that transforms the image contents. The returned image is the same
dimensions.
**PARAMETERS**
* *cornerpoints* - a 2x4 tuple of points. The order is (top_left, top_right, bottom_left, bottom_right)
**RETURNS**
A simpleCV image.
**EXAMPLE**
>>> img = Image("lenna")
>>> points = ((50,0),(img.width+50,0),(img.width,img.height),(0,img.height))
>>> img.shear(points).show()
**SEE ALSO**
:py:meth:`transformAffine`
:py:meth:`warp`
:py:meth:`rotate`
http://en.wikipedia.org/wiki/Transformation_matrix
"""
src = ((0, 0), (self.width-1, 0), (self.width-1, self.height-1))
#set the original points
aWarp = cv.CreateMat(2, 3, cv.CV_32FC1)
#create the empty warp matrix
cv.GetAffineTransform(src, cornerpoints, aWarp)
return self.transformAffine(aWarp)
def transformAffine(self, rotMatrix):
"""
**SUMMARY**
This helper function for shear performs an affine rotation using the supplied matrix.
The matrix can be a either an openCV mat or an np.ndarray type.
The matrix should be a 2x3
**PARAMETERS**
* *rotMatrix* - A 2x3 numpy array or CvMat of the affine transform.
**RETURNS**
The rotated image. Note that the rotation is done in place, i.e. the image is not enlarged to fit the transofmation.
**EXAMPLE**
>>> img = Image("lenna")
>>> points = ((50,0),(img.width+50,0),(img.width,img.height),(0,img.height))
>>> src = ((0, 0), (img.width-1, 0), (img.width-1, img.height-1))
>>> result = cv.createMat(2,3,cv.CV_32FC1)
>>> cv.GetAffineTransform(src,points,result)
>>> img.transformAffine(result).show()
**SEE ALSO**
:py:meth:`shear`
:py:meth`warp`
:py:meth:`transformPerspective`
:py:meth:`rotate`
http://en.wikipedia.org/wiki/Transformation_matrix
"""
retVal = self.getEmpty()
if(type(rotMatrix) == np.ndarray ):
rotMatrix = npArray2cvMat(rotMatrix)
cv.WarpAffine(self.getBitmap(), retVal, rotMatrix)
return Image(retVal, colorSpace=self._colorSpace)
def warp(self, cornerpoints):
"""
**SUMMARY**
This method performs and arbitrary perspective transform.
Given a new set of corner points in clockwise order frin top left, return an Image with
the images contents warped to the new coordinates. The returned image
will be the same size as the original image
**PARAMETERS**
* *cornerpoints* - A list of four tuples corresponding to the destination corners in the order of (top_left,top_right,bottom_left,bottom_right)
**RETURNS**
A simpleCV Image with the warp applied. Note that this operation does not enlarge the image.
**EXAMPLE**
>>> img = Image("lenna")
>>> points = ((30, 30), (img.width-10, 70), (img.width-1-40, img.height-1+30),(20,img.height+10))
>>> img.warp(points).show()
**SEE ALSO**
:py:meth:`shear`
:py:meth:`transformAffine`
:py:meth:`transformPerspective`
:py:meth:`rotate`
http://en.wikipedia.org/wiki/Transformation_matrix
"""
#original coordinates
src = ((0, 0), (self.width-1, 0), (self.width-1, self.height-1), (0, self.height-1))
pWarp = cv.CreateMat(3, 3, cv.CV_32FC1) #create an empty 3x3 matrix
cv.GetPerspectiveTransform(src, cornerpoints, pWarp) #figure out the warp matrix
return self.transformPerspective(pWarp)
def transformPerspective(self, rotMatrix):
"""
**SUMMARY**
This helper function for warp performs an affine rotation using the supplied matrix.
The matrix can be a either an openCV mat or an np.ndarray type.
The matrix should be a 3x3
**PARAMETERS**
* *rotMatrix* - Numpy Array or CvMat
**RETURNS**
The rotated image. Note that the rotation is done in place, i.e. the image is not enlarged to fit the transofmation.
**EXAMPLE**
>>> img = Image("lenna")
>>> points = ((50,0),(img.width+50,0),(img.width,img.height),(0,img.height))
>>> src = ((30, 30), (img.width-10, 70), (img.width-1-40, img.height-1+30),(20,img.height+10))
>>> result = cv.CreateMat(3,3,cv.CV_32FC1)
>>> cv.GetPerspectiveTransform(src,points,result)
>>> img.transformPerspective(result).show()
**SEE ALSO**
:py:meth:`shear`
:py:meth:`warp`
:py:meth:`transformPerspective`
:py:meth:`rotate`
http://en.wikipedia.org/wiki/Transformation_matrix
"""
try:
import cv2
if( type(rotMatrix) != np.ndarray ):
rotMatrix = np.array(rotMatrix)
retVal = cv2.warpPerspective(src=np.array(self.getMatrix()), dsize=(self.width,self.height),M=rotMatrix,flags = cv2.INTER_CUBIC)
return Image(retVal, colorSpace=self._colorSpace, cv2image=True)
except:
retVal = self.getEmpty()
if(type(rotMatrix) == np.ndarray ):
rotMatrix = npArray2cvMat(rotMatrix)
cv.WarpPerspective(self.getBitmap(), retVal, rotMatrix)
return Image(retVal, colorSpace=self._colorSpace)
def getPixel(self, x, y):
"""
**SUMMARY**
This function returns the RGB value for a particular image pixel given a specific row and column.
.. Warning::
this function will always return pixels in RGB format even if the image is BGR format.
**PARAMETERS**
* *x* - Int the x pixel coordinate.
* *y* - Int the y pixel coordinate.
**RETURNS**
A color value that is a three element integer tuple.
**EXAMPLE**
>>> img = Image(logo)
>>> color = img.getPixel(10,10)
.. Warning::
We suggest that this method be used sparingly. For repeated pixel access use python array notation. I.e. img[x][y].
"""
c = None
retVal = None
if( x < 0 or x >= self.width ):
logger.warning("getRGBPixel: X value is not valid.")
elif( y < 0 or y >= self.height ):
logger.warning("getRGBPixel: Y value is not valid.")
else:
c = cv.Get2D(self.getBitmap(), y, x)
if( self._colorSpace == ColorSpace.BGR ):
retVal = (c[2],c[1],c[0])
else:
retVal = (c[0],c[1],c[2])
return retVal
def getGrayPixel(self, x, y):
"""
**SUMMARY**
This function returns the gray value for a particular image pixel given a specific row and column.
.. Warning::
This function will always return pixels in RGB format even if the image is BGR format.
**PARAMETERS**
* *x* - Int the x pixel coordinate.
* *y* - Int the y pixel coordinate.
**RETURNS**
A gray value integer between 0 and 255.
**EXAMPLE**
>>> img = Image(logo)
>>> color = img.getGrayPixel(10,10)
.. Warning::
We suggest that this method be used sparingly. For repeated pixel access use python array notation. I.e. img[x][y].
"""
retVal = None
if( x < 0 or x >= self.width ):
logger.warning("getGrayPixel: X value is not valid.")
elif( y < 0 or y >= self.height ):
logger.warning("getGrayPixel: Y value is not valid.")
else:
retVal = cv.Get2D(self._getGrayscaleBitmap(), y, x)
retVal = retVal[0]
return retVal
def getVertScanline(self, column):
"""
**SUMMARY**
This function returns a single column of RGB values from the image as a numpy array. This is handy if you
want to crawl the image looking for an edge.
**PARAMETERS**
* *column* - the column number working from left=0 to right=img.width.
**RETURNS**
A numpy array of the pixel values. Ususally this is in BGR format.
**EXAMPLE**
>>> img = Image("lenna")
>>> myColor = [0,0,0]
>>> sl = img.getVertScanline(423)
>>> sll = sl.tolist()
>>> for p in sll:
>>> if( p == myColor ):
>>> # do something
**SEE ALSO**
:py:meth:`getHorzScanlineGray`
:py:meth:`getHorzScanline`
:py:meth:`getVertScanlineGray`
:py:meth:`getVertScanline`
"""
retVal = None
if( column < 0 or column >= self.width ):
logger.warning("getVertRGBScanline: column value is not valid.")
else:
retVal = cv.GetCol(self.getBitmap(), column)
retVal = np.array(retVal)
retVal = retVal[:, 0, :]
return retVal
def getHorzScanline(self, row):
"""
**SUMMARY**
This function returns a single row of RGB values from the image.
This is handy if you want to crawl the image looking for an edge.
**PARAMETERS**
* *row* - the row number working from top=0 to bottom=img.height.
**RETURNS**
A a lumpy numpy array of the pixel values. Ususally this is in BGR format.
**EXAMPLE**
>>> img = Image("lenna")
>>> myColor = [0,0,0]
>>> sl = img.getHorzScanline(422)
>>> sll = sl.tolist()
>>> for p in sll:
>>> if( p == myColor ):
>>> # do something
**SEE ALSO**
:py:meth:`getHorzScanlineGray`
:py:meth:`getVertScanlineGray`
:py:meth:`getVertScanline`
"""
retVal = None
if( row < 0 or row >= self.height ):
logger.warning("getHorzRGBScanline: row value is not valid.")
else:
retVal = cv.GetRow(self.getBitmap(), row)
retVal = np.array(retVal)
retVal = retVal[0, :, :]
return retVal
def getVertScanlineGray(self, column):
"""
**SUMMARY**
This function returns a single column of gray values from the image as a numpy array. This is handy if you
want to crawl the image looking for an edge.
**PARAMETERS**
* *column* - the column number working from left=0 to right=img.width.
**RETURNS**
A a lumpy numpy array of the pixel values.
**EXAMPLE**
>>> img = Image("lenna")
>>> myColor = [255]
>>> sl = img.getVertScanlineGray(421)
>>> sll = sl.tolist()
>>> for p in sll:
>>> if( p == myColor ):
>>> # do something
**SEE ALSO**
:py:meth:`getHorzScanlineGray`
:py:meth:`getHorzScanline`
:py:meth:`getVertScanline`
"""
retVal = None
if( column < 0 or column >= self.width ):
logger.warning("getHorzRGBScanline: row value is not valid.")
else:
retVal = cv.GetCol(self._getGrayscaleBitmap(), column )
retVal = np.array(retVal)
#retVal = retVal.transpose()
return retVal
def getHorzScanlineGray(self, row):
"""
**SUMMARY**
This function returns a single row of gray values from the image as a numpy array. This is handy if you
want to crawl the image looking for an edge.
**PARAMETERS**
* *row* - the row number working from top=0 to bottom=img.height.
**RETURNS**
A a lumpy numpy array of the pixel values.
**EXAMPLE**
>>> img = Image("lenna")
>>> myColor = [255]
>>> sl = img.getHorzScanlineGray(420)
>>> sll = sl.tolist()
>>> for p in sll:
>>> if( p == myColor ):
>>> # do something
**SEE ALSO**
:py:meth:`getHorzScanlineGray`
:py:meth:`getHorzScanline`
:py:meth:`getVertScanlineGray`
:py:meth:`getVertScanline`
"""
retVal = None
if( row < 0 or row >= self.height ):
logger.warning("getHorzRGBScanline: row value is not valid.")
else:
retVal = cv.GetRow(self._getGrayscaleBitmap(), row )
retVal = np.array(retVal)
retVal = retVal.transpose()
return retVal
def crop(self, x , y = None, w = None, h = None, centered=False, smart=False):
"""
**SUMMARY**
Consider you want to crop a image with the following dimension::
(x,y)
+--------------+
| |
| |h
| |
+--------------+
w (x1,y1)
Crop attempts to use the x and y position variables and the w and h width
and height variables to crop the image. When centered is false, x and y
define the top and left of the cropped rectangle. When centered is true
the function uses x and y as the centroid of the cropped region.
You can also pass a feature into crop and have it automatically return
the cropped image within the bounding outside area of that feature
Or parameters can be in the form of a
- tuple or list : (x,y,w,h) or [x,y,w,h]
- two points : (x,y),(x1,y1) or [(x,y),(x1,y1)]
**PARAMETERS**
* *x* - An integer or feature.
- If it is a feature we crop to the features dimensions.
- This can be either the top left corner of the image or the center cooridnate of the the crop region.
- or in the form of tuple/list. i,e (x,y,w,h) or [x,y,w,h]
- Otherwise in two point form. i,e [(x,y),(x1,y1)] or (x,y)
* *y* - The y coordinate of the center, or top left corner of the crop region.
- Otherwise in two point form. i,e (x1,y1)
* *w* - Int - the width of the cropped region in pixels.
* *h* - Int - the height of the cropped region in pixels.
* *centered* - Boolean - if True we treat the crop region as being the center
coordinate and a width and height. If false we treat it as the top left corner of the crop region.
* *smart* - Will make sure you don't try and crop outside the image size, so if your image is 100x100 and you tried a crop like img.crop(50,50,100,100), it will autoscale the crop to the max width.
**RETURNS**
A SimpleCV Image cropped to the specified width and height.
**EXAMPLE**
>>> img = Image('lenna')
>>> img.crop(50,40,128,128).show()
>>> img.crop((50,40,128,128)).show() #roi
>>> img.crop([50,40,128,128]) #roi
>>> img.crop((50,40),(178,168)) # two point form
>>> img.crop([(50,40),(178,168)]) # two point form
>>> img.crop([x1,x2,x3,x4,x5],[y1,y1,y3,y4,y5]) # list of x's and y's
>>> img.crop([(x,y),(x,y),(x,y),(x,y),(x,y)] # list of (x,y)
>>> img.crop(x,y,100,100, smart=True)
**SEE ALSO**
:py:meth:`embiggen`
:py:meth:`regionSelect`
"""
if smart:
if x > self.width:
x = self.width
elif x < 0:
x = 0
elif y > self.height:
y = self.height
elif y < 0:
y = 0
elif (x + w) > self.width:
w = self.width - x
elif (y + h) > self.height:
h = self.height - y
if(isinstance(x,np.ndarray)):
x = x.tolist()
if(isinstance(y,np.ndarray)):
y = y.tolist()
#If it's a feature extract what we need
if(isinstance(x, Feature)):
theFeature = x
x = theFeature.points[0][0]
y = theFeature.points[0][1]
w = theFeature.width()
h = theFeature.height()
elif(isinstance(x, (tuple,list)) and len(x) == 4 and isinstance(x[0],(int, long, float))
and y == None and w == None and h == None):
x,y,w,h = x
# x of the form [(x,y),(x1,y1),(x2,y2),(x3,y3)]
# x of the form [[x,y],[x1,y1],[x2,y2],[x3,y3]]
# x of the form ([x,y],[x1,y1],[x2,y2],[x3,y3])
# x of the form ((x,y),(x1,y1),(x2,y2),(x3,y3))
# x of the form (x,y,x1,y2) or [x,y,x1,y2]
elif( isinstance(x, (list,tuple)) and
isinstance(x[0],(list,tuple)) and
(len(x) == 4 and len(x[0]) == 2 ) and
y == None and w == None and h == None):
if (len(x[0])==2 and len(x[1])==2 and len(x[2])==2 and len(x[3])==2):
xmax = np.max([x[0][0],x[1][0],x[2][0],x[3][0]])
ymax = np.max([x[0][1],x[1][1],x[2][1],x[3][1]])
xmin = np.min([x[0][0],x[1][0],x[2][0],x[3][0]])
ymin = np.min([x[0][1],x[1][1],x[2][1],x[3][1]])
x = xmin
y = ymin
w = xmax-xmin
h = ymax-ymin
else:
logger.warning("x should be in the form ((x,y),(x1,y1),(x2,y2),(x3,y3))")
return None
# x,y of the form [x1,x2,x3,x4,x5....] and y similar
elif(isinstance(x, (tuple,list)) and
isinstance(y, (tuple,list)) and
len(x) > 4 and len(y) > 4 ):
if(isinstance(x[0],(int, long, float)) and isinstance(y[0],(int, long, float))):
xmax = np.max(x)
ymax = np.max(y)
xmin = np.min(x)
ymin = np.min(y)
x = xmin
y = ymin
w = xmax-xmin
h = ymax-ymin
else:
logger.warning("x should be in the form x = [1,2,3,4,5] y =[0,2,4,6,8]")
return None
# x of the form [(x,y),(x,y),(x,y),(x,y),(x,y),(x,y)]
elif(isinstance(x, (list,tuple)) and
len(x) > 4 and len(x[0]) == 2 and y == None and w == None and h == None):
if(isinstance(x[0][0],(int, long, float))):
xs = [pt[0] for pt in x]
ys = [pt[1] for pt in x]
xmax = np.max(xs)
ymax = np.max(ys)
xmin = np.min(xs)
ymin = np.min(ys)
x = xmin
y = ymin
w = xmax-xmin
h = ymax-ymin
else:
logger.warning("x should be in the form [(x,y),(x,y),(x,y),(x,y),(x,y),(x,y)]")
return None
# x of the form [(x,y),(x1,y1)]
elif(isinstance(x,(list,tuple)) and len(x) == 2 and isinstance(x[0],(list,tuple)) and isinstance(x[1],(list,tuple)) and y == None and w == None and h == None):
if (len(x[0])==2 and len(x[1])==2):
xt = np.min([x[0][0],x[1][0]])
yt = np.min([x[0][0],x[1][0]])
w = np.abs(x[0][0]-x[1][0])
h = np.abs(x[0][1]-x[1][1])
x = xt
y = yt
else:
logger.warning("x should be in the form [(x1,y1),(x2,y2)]")
return None
# x and y of the form (x,y),(x1,y2)
elif(isinstance(x, (tuple,list)) and isinstance(y,(tuple,list)) and w == None and h == None):
if (len(x)==2 and len(y)==2):
xt = np.min([x[0],y[0]])
yt = np.min([x[1],y[1]])
w = np.abs(y[0]-x[0])
h = np.abs(y[1]-x[1])
x = xt
y = yt
else:
logger.warning("if x and y are tuple it should be in the form (x1,y1) and (x2,y2)")
return None
if(y == None or w == None or h == None):
print "Please provide an x, y, width, height to function"
if( w <= 0 or h <= 0 ):
logger.warning("Can't do a negative crop!")
return None
retVal = cv.CreateImage((int(w),int(h)), cv.IPL_DEPTH_8U, 3)
if( x < 0 or y < 0 ):
logger.warning("Crop will try to help you, but you have a negative crop position, your width and height may not be what you want them to be.")
if( centered ):
rectangle = (int(x-(w/2)), int(y-(h/2)), int(w), int(h))
else:
rectangle = (int(x), int(y), int(w), int(h))
(topROI, bottomROI) = self._rectOverlapROIs((rectangle[2],rectangle[3]),(self.width,self.height),(rectangle[0],rectangle[1]))
if( bottomROI is None ):
logger.warning("Hi, your crop rectangle doesn't even overlap your image. I have no choice but to return None.")
return None
retVal = np.zeros((bottomROI[3],bottomROI[2],3),dtype='uint8')
retVal= self.getNumpyCv2()[bottomROI[1]:bottomROI[1] + bottomROI[3],bottomROI[0]:bottomROI[0] + bottomROI[2],:]
img = Image(retVal, colorSpace=self._colorSpace,cv2image = True)
#Buffering the top left point (x, y) in a image.
img._uncroppedX = self._uncroppedX + int(x)
img._uncroppedY = self._uncroppedY + int(y)
return img
def regionSelect(self, x1, y1, x2, y2 ):
"""
**SUMMARY**
Region select is similar to crop, but instead of taking a position and width
and height values it simply takes two points on the image and returns the selected
region. This is very helpful for creating interactive scripts that require
the user to select a region.
**PARAMETERS**
* *x1* - Int - Point one x coordinate.
* *y1* - Int - Point one y coordinate.
* *x2* - Int - Point two x coordinate.
* *y2* - Int - Point two y coordinate.
**RETURNS**
A cropped SimpleCV Image.
**EXAMPLE**
>>> img = Image("lenna")
>>> subreg = img.regionSelect(10,10,100,100) # often this comes from a mouse click
>>> subreg.show()
**SEE ALSO**
:py:meth:`crop`
"""
w = abs(x1-x2)
h = abs(y1-y2)
retVal = None
if( w <= 0 or h <= 0 or w > self.width or h > self.height ):
logger.warning("regionSelect: the given values will not fit in the image or are too small.")
else:
xf = x2
if( x1 < x2 ):
xf = x1
yf = y2
if( y1 < y2 ):
yf = y1
retVal = self.crop(xf, yf, w, h)
return retVal
def clear(self):
"""
**SUMMARY**
This is a slightly unsafe method that clears out the entire image state
it is usually used in conjunction with the drawing blobs to fill in draw
a single large blob in the image.
.. Warning:
Do not use this method unless you have a particularly compelling reason.
"""
cv.SetZero(self._bitmap)
self._clearBuffers()
def draw(self, features, color=Color.GREEN, width=1, autocolor=False):
"""
**SUMMARY**
This is a method to draw Features on any given image.
**PARAMETERS**
* *features* - FeatureSet or any Feature (eg. Line, Circle, Corner, etc)
* *color* - Color of the Feature to be drawn
* *width* - width of the Feature to be drawn
* *autocolor*- If true a color is randomly selected for each feature
**RETURNS**
None
**EXAMPLE**
img = Image("lenna")
lines = img.equalize().findLines()
img.draw(lines)
img.show()
"""
if type(features) == type(self):
warnings.warn("You need to pass drawable features.")
return None
if hasattr(features, 'draw'):
from copy import deepcopy
if isinstance(features, FeatureSet):
cfeatures = deepcopy(features)
for cfeat in cfeatures:
cfeat.image = self
cfeatures.draw(color, width, autocolor)
else:
cfeatures = deepcopy(features)
cfeatures.image = self
cfeatures.draw(color, width)
else:
warnings.warn("You need to pass drawable features.")
return None
def drawText(self, text = "", x = None, y = None, color = Color.BLUE, fontsize = 16):
"""
**SUMMARY**
This function draws the string that is passed on the screen at the specified coordinates.
The Default Color is blue but you can pass it various colors
The text will default to the center of the screen if you don't pass it a value
**PARAMETERS**
* *text* - String - the text you want to write. ASCII only please.
* *x* - Int - the x position in pixels.
* *y* - Int - the y position in pixels.
* *color* - Color object or Color Tuple
* *fontsize* - Int - the font size - roughly in points.
**RETURNS**
Nothing. This is an in place function. Text is added to the Images drawing layer.
**EXAMPLE**
>>> img = Image("lenna")
>>> img.drawText("xamox smells like cool ranch doritos.", 50,50,color=Color.BLACK,fontsize=48)
>>> img.show()
**SEE ALSO**
:py:meth:`dl`
:py:meth:`drawCircle`
:py:meth:`drawRectangle`
"""
if(x == None):
x = (self.width / 2)
if(y == None):
y = (self.height / 2)
self.getDrawingLayer().setFontSize(fontsize)
self.getDrawingLayer().text(text, (x, y), color)
def drawRectangle(self,x,y,w,h,color=Color.RED,width=1,alpha=255):
"""
**SUMMARY**
Draw a rectangle on the screen given the upper left corner of the rectangle
and the width and height.
**PARAMETERS**
* *x* - the x position.
* *y* - the y position.
* *w* - the width of the rectangle.
* *h* - the height of the rectangle.
* *color* - an RGB tuple indicating the desired color.
* *width* - the width of the rectangle, a value less than or equal to zero means filled in completely.
* *alpha* - the alpha value on the interval from 255 to 0, 255 is opaque, 0 is completely transparent.
**RETURNS**
None - this operation is in place and adds the rectangle to the drawing layer.
**EXAMPLE**
>>> img = Image("lenna")
>>> img.drawREctange( 50,50,100,123)
>>> img.show()
**SEE ALSO**
:py:meth:`dl`
:py:meth:`drawCircle`
:py:meth:`drawRectangle`
:py:meth:`applyLayers`
:py:class:`DrawingLayer`
"""
if( width < 1 ):
self.getDrawingLayer().rectangle((x,y),(w,h),color,filled=True,alpha=alpha)
else:
self.getDrawingLayer().rectangle((x,y),(w,h),color,width,alpha=alpha)
def drawRotatedRectangle(self,boundingbox,color=Color.RED,width=1):
"""
**SUMMARY**
Draw the minimum bouding rectangle. This rectangle is a series of four points.
**TODO**
**KAT FIX THIS**
"""
cv.EllipseBox(self.getBitmap(),box=boundingbox,color=color,thicness=width)
def show(self, type = 'window'):
"""
**SUMMARY**
This function automatically pops up a window and shows the current image.
**PARAMETERS**
* *type* - this string can have one of two values, either 'window', or 'browser'. Window opens
a display window, while browser opens the default web browser to show an image.
**RETURNS**
This method returns the display object. In the case of the window this is a JpegStreamer
object. In the case of a window a display object is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> img.show()
>>> img.show('browser')
**SEE ALSO**
:py:class:`JpegStreamer`
:py:class:`Display`
"""
if(type == 'browser'):
import webbrowser
js = JpegStreamer(8080)
self.save(js)
webbrowser.open("http://localhost:8080", 2)
return js
elif (type == 'window'):
from SimpleCV.Display import Display
if init_options_handler.on_notebook:
d = Display(displaytype='notebook')
else:
d = Display(self.size())
self.save(d)
return d
else:
print "Unknown type to show"
def _surface2Image(self,surface):
imgarray = pg.surfarray.array3d(surface)
retVal = Image(imgarray)
retVal._colorSpace = ColorSpace.RGB
return retVal.toBGR().transpose()
def _image2Surface(self,img):
return pg.image.fromstring(img.getPIL().tostring(),img.size(), "RGB")
#return pg.surfarray.make_surface(img.toRGB().getNumpy())
def toPygameSurface(self):
"""
**SUMMARY**
Converts this image to a pygame surface. This is useful if you want
to treat an image as a sprite to render onto an image. An example
would be rendering blobs on to an image.
.. Warning::
*THIS IS EXPERIMENTAL*. We are plannng to remove this functionality sometime in the near future.
**RETURNS**
The image as a pygame surface.
**SEE ALSO**
:py:class:`DrawingLayer`
:py:meth:`insertDrawingLayer`
:py:meth:`addDrawingLayer`
:py:meth:`dl`
:py:meth:`toPygameSurface`
:py:meth:`getDrawingLayer`
:py:meth:`removeDrawingLayer`
:py:meth:`clearLayers`
:py:meth:`layers`
:py:meth:`mergedLayers`
:py:meth:`applyLayers`
:py:meth:`drawText`
:py:meth:`drawRectangle`
:py:meth:`drawCircle`
:py:meth:`blit`
"""
return pg.image.fromstring(self.getPIL().tostring(),self.size(), "RGB")
def addDrawingLayer(self, layer = None):
"""
**SUMMARY**
Push a new drawing layer onto the back of the layer stack
**PARAMETERS**
* *layer* - The new drawing layer to add.
**RETURNS**
The index of the new layer as an integer.
**EXAMPLE**
>>> img = Image("Lenna")
>>> myLayer = DrawingLayer((img.width,img.height))
>>> img.addDrawingLayer(myLayer)
**SEE ALSO**
:py:class:`DrawingLayer`
:py:meth:`insertDrawinglayer`
:py:meth:`addDrawinglayer`
:py:meth:`dl`
:py:meth:`toPygameSurface`
:py:meth:`getDrawingLayer`
:py:meth:`removeDrawingLayer`
:py:meth:`clearLayers`
:py:meth:`layers`
:py:meth:`mergedLayers`
:py:meth:`applyLayers`
:py:meth:`drawText`
:py:meth:`drawRectangle`
:py:meth:`drawCircle`
:py:meth:`blit`
"""
if not isinstance(layer, DrawingLayer):
return "Please pass a DrawingLayer object"
if not layer:
layer = DrawingLayer(self.size())
self._mLayers.append(layer)
return len(self._mLayers)-1
def insertDrawingLayer(self, layer, index):
"""
**SUMMARY**
Insert a new layer into the layer stack at the specified index.
**PARAMETERS**
* *layer* - A drawing layer with crap you want to draw.
* *index* - The index at which to insert the layer.
**RETURNS**
None - that's right - nothing.
**EXAMPLE**
>>> img = Image("Lenna")
>>> myLayer1 = DrawingLayer((img.width,img.height))
>>> myLayer2 = DrawingLayer((img.width,img.height))
>>> #Draw on the layers
>>> img.insertDrawingLayer(myLayer1,1) # on top
>>> img.insertDrawingLayer(myLayer2,2) # on the bottom
**SEE ALSO**
:py:class:`DrawingLayer`
:py:meth:`addDrawinglayer`
:py:meth:`dl`
:py:meth:`toPygameSurface`
:py:meth:`getDrawingLayer`
:py:meth:`removeDrawingLayer`
:py:meth:`clearLayers`
:py:meth:`layers`
:py:meth:`mergedLayers`
:py:meth:`applyLayers`
:py:meth:`drawText`
:py:meth:`drawRectangle`
:py:meth:`drawCircle`
:py:meth:`blit`
"""
self._mLayers.insert(index, layer)
return None
def removeDrawingLayer(self, index = -1):
"""
**SUMMARY**
Remove a layer from the layer stack based on the layer's index.
**PARAMETERS**
* *index* - Int - the index of the layer to remove.
**RETURNS**
This method returns the removed drawing layer.
**EXAMPLES**
>>> img = Image("Lenna")
>>> img.removeDrawingLayer(1) # removes the layer with index = 1
>>> img.removeDrawingLayer() # if no index is specified it removes the top layer
**SEE ALSO**
:py:class:`DrawingLayer`
:py:meth:`addDrawinglayer`
:py:meth:`dl`
:py:meth:`toPygameSurface`
:py:meth:`getDrawingLayer`
:py:meth:`removeDrawingLayer`
:py:meth:`clearLayers`
:py:meth:`layers`
:py:meth:`mergedLayers`
:py:meth:`applyLayers`
:py:meth:`drawText`
:py:meth:`drawRectangle`
:py:meth:`drawCircle`
:py:meth:`blit`
"""
try:
return self._mLayers.pop(index)
except IndexError:
print 'Not a valid index or No layers to remove!'
def getDrawingLayer(self, index = -1):
"""
**SUMMARY**
Return a drawing layer based on the provided index. If not provided, will
default to the top layer. If no layers exist, one will be created
**PARAMETERS**
* *index* - returns the drawing layer at the specified index.
**RETURNS**
A drawing layer.
**EXAMPLE**
>>> img = Image("Lenna")
>>> myLayer1 = DrawingLayer((img.width,img.height))
>>> myLayer2 = DrawingLayer((img.width,img.height))
>>> #Draw on the layers
>>> img.insertDrawingLayer(myLayer1,1) # on top
>>> img.insertDrawingLayer(myLayer2,2) # on the bottom
>>> layer2 =img.getDrawingLayer(2)
**SEE ALSO**
:py:class:`DrawingLayer`
:py:meth:`addDrawinglayer`
:py:meth:`dl`
:py:meth:`toPygameSurface`
:py:meth:`getDrawingLayer`
:py:meth:`removeDrawingLayer`
:py:meth:`clearLayers`
:py:meth:`layers`
:py:meth:`mergedLayers`
:py:meth:`applyLayers`
:py:meth:`drawText`
:py:meth:`drawRectangle`
:py:meth:`drawCircle`
:py:meth:`blit`
"""
if not len(self._mLayers):
layer = DrawingLayer(self.size())
self.addDrawingLayer(layer)
try:
return self._mLayers[index]
except IndexError:
print 'Not a valid index'
def dl(self, index = -1):
"""
**SUMMARY**
Alias for :py:meth:`getDrawingLayer`
"""
return self.getDrawingLayer(index)
def clearLayers(self):
"""
**SUMMARY**
Remove all of the drawing layers.
**RETURNS**
None.
**EXAMPLE**
>>> img = Image("Lenna")
>>> myLayer1 = DrawingLayer((img.width,img.height))
>>> myLayer2 = DrawingLayer((img.width,img.height))
>>> img.insertDrawingLayer(myLayer1,1) # on top
>>> img.insertDrawingLayer(myLayer2,2) # on the bottom
>>> img.clearLayers()
**SEE ALSO**
:py:class:`DrawingLayer`
:py:meth:`dl`
:py:meth:`toPygameSurface`
:py:meth:`getDrawingLayer`
:py:meth:`removeDrawingLayer`
:py:meth:`layers`
:py:meth:`mergedLayers`
:py:meth:`applyLayers`
:py:meth:`drawText`
:py:meth:`drawRectangle`
:py:meth:`drawCircle`
:py:meth:`blit`
"""
for i in self._mLayers:
self._mLayers.remove(i)
return None
def layers(self):
"""
**SUMMARY**
Return the array of DrawingLayer objects associated with the image.
**RETURNS**
A list of of drawing layers.
**SEE ALSO**
:py:class:`DrawingLayer`
:py:meth:`addDrawingLayer`
:py:meth:`dl`
:py:meth:`toPygameSurface`
:py:meth:`getDrawingLayer`
:py:meth:`removeDrawingLayer`
:py:meth:`mergedLayers`
:py:meth:`applyLayers`
:py:meth:`drawText`
:py:meth:`drawRectangle`
:py:meth:`drawCircle`
:py:meth:`blit`
"""
return self._mLayers
#render the image.
def _renderImage(self, layer):
imgSurf = self.getPGSurface(self).copy()
imgSurf.blit(layer._mSurface, (0, 0))
return Image(imgSurf)
def mergedLayers(self):
"""
**SUMMARY**
Return all DrawingLayer objects as a single DrawingLayer.
**RETURNS**
Returns a drawing layer with all of the drawing layers of this image merged into one.
**EXAMPLE**
>>> img = Image("Lenna")
>>> myLayer1 = DrawingLayer((img.width,img.height))
>>> myLayer2 = DrawingLayer((img.width,img.height))
>>> img.insertDrawingLayer(myLayer1,1) # on top
>>> img.insertDrawingLayer(myLayer2,2) # on the bottom
>>> derp = img.mergedLayers()
**SEE ALSO**
:py:class:`DrawingLayer`
:py:meth:`addDrawingLayer`
:py:meth:`dl`
:py:meth:`toPygameSurface`
:py:meth:`getDrawingLayer`
:py:meth:`removeDrawingLayer`
:py:meth:`layers`
:py:meth:`applyLayers`
:py:meth:`drawText`
:py:meth:`drawRectangle`
:py:meth:`drawCircle`
:py:meth:`blit`
"""
final = DrawingLayer(self.size())
for layers in self._mLayers: #compose all the layers
layers.renderToOtherLayer(final)
return final
def applyLayers(self, indicies=-1):
"""
**SUMMARY**
Render all of the layers onto the current image and return the result.
Indicies can be a list of integers specifying the layers to be used.
**PARAMETERS**
* *indicies* - Indicies can be a list of integers specifying the layers to be used.
**RETURNS**
The image after applying the drawing layers.
**EXAMPLE**
>>> img = Image("Lenna")
>>> myLayer1 = DrawingLayer((img.width,img.height))
>>> myLayer2 = DrawingLayer((img.width,img.height))
>>> #Draw some stuff
>>> img.insertDrawingLayer(myLayer1,1) # on top
>>> img.insertDrawingLayer(myLayer2,2) # on the bottom
>>> derp = img.applyLayers()
**SEE ALSO**
:py:class:`DrawingLayer`
:py:meth:`dl`
:py:meth:`toPygameSurface`
:py:meth:`getDrawingLayer`
:py:meth:`removeDrawingLayer`
:py:meth:`layers`
:py:meth:`drawText`
:py:meth:`drawRectangle`
:py:meth:`drawCircle`
:py:meth:`blit`
"""
if not len(self._mLayers):
return self
if(indicies==-1 and len(self._mLayers) > 0 ):
final = self.mergedLayers()
imgSurf = self.getPGSurface().copy()
imgSurf.blit(final._mSurface, (0, 0))
return Image(imgSurf)
else:
final = DrawingLayer((self.width, self.height))
retVal = self
indicies.reverse()
for idx in indicies:
retVal = self._mLayers[idx].renderToOtherLayer(final)
imgSurf = self.getPGSurface().copy()
imgSurf.blit(final._mSurface, (0, 0))
indicies.reverse()
return Image(imgSurf)
def adaptiveScale(self, resolution,fit=True):
"""
**SUMMARY**
Adapative Scale is used in the Display to automatically
adjust image size to match the display size. This method attempts to scale
an image to the desired resolution while keeping the aspect ratio the same.
If fit is False we simply crop and center the image to the resolution.
In general this method should look a lot better than arbitrary cropping and scaling.
**PARAMETERS**
* *resolution* - The size of the returned image as a (width,height) tuple.
* *fit* - If fit is true we try to fit the image while maintaining the aspect ratio.
If fit is False we crop and center the image to fit the resolution.
**RETURNS**
A SimpleCV Image.
**EXAMPLE**
This is typically used in this instance:
>>> d = Display((800,600))
>>> i = Image((640, 480))
>>> i.save(d)
Where this would scale the image to match the display size of 800x600
"""
wndwAR = float(resolution[0])/float(resolution[1])
imgAR = float(self.width)/float(self.height)
img = self
targetx = 0
targety = 0
targetw = resolution[0]
targeth = resolution[1]
if( self.size() == resolution): # we have to resize
retVal = self
elif( imgAR == wndwAR and fit):
retVal = img.scale(resolution[0],resolution[1])
return retVal
elif(fit):
#scale factors
retVal = np.zeros((resolution[1],resolution[0],3),dtype='uint8')
wscale = (float(self.width)/float(resolution[0]))
hscale = (float(self.height)/float(resolution[1]))
if(wscale>1): #we're shrinking what is the percent reduction
wscale=1-(1.0/wscale)
else: # we need to grow the image by a percentage
wscale = 1.0-wscale
if(hscale>1):
hscale=1-(1.0/hscale)
else:
hscale=1.0-hscale
if( wscale == 0 ): #if we can get away with not scaling do that
targetx = 0
targety = (resolution[1]-self.height)/2
targetw = img.width
targeth = img.height
elif( hscale == 0 ): #if we can get away with not scaling do that
targetx = (resolution[0]-img.width)/2
targety = 0
targetw = img.width
targeth = img.height
elif(wscale < hscale): # the width has less distortion
sfactor = float(resolution[0])/float(self.width)
targetw = int(float(self.width)*sfactor)
targeth = int(float(self.height)*sfactor)
if( targetw > resolution[0] or targeth > resolution[1]):
#aw shucks that still didn't work do the other way instead
sfactor = float(resolution[1])/float(self.height)
targetw = int(float(self.width)*sfactor)
targeth = int(float(self.height)*sfactor)
targetx = (resolution[0]-targetw)/2
targety = 0
else:
targetx = 0
targety = (resolution[1]-targeth)/2
img = img.scale(targetw,targeth)
else: #the height has more distortion
sfactor = float(resolution[1])/float(self.height)
targetw = int(float(self.width)*sfactor)
targeth = int(float(self.height)*sfactor)
if( targetw > resolution[0] or targeth > resolution[1]):
#aw shucks that still didn't work do the other way instead
sfactor = float(resolution[0])/float(self.width)
targetw = int(float(self.width)*sfactor)
targeth = int(float(self.height)*sfactor)
targetx = 0
targety = (resolution[1]-targeth)/2
else:
targetx = (resolution[0]-targetw)/2
targety = 0
img = img.scale(targetw,targeth)
else: # we're going to crop instead
if(self.width <= resolution[0] and self.height <= resolution[1] ): # center a too small image
#we're too small just center the thing
retVal = np.zeros((resolution[1],resolution[0],3),dtype='uint8')
targetx = (resolution[0]/2)-(self.width/2)
targety = (resolution[1]/2)-(self.height/2)
targeth = self.height
targetw = self.width
elif(self.width > resolution[0] and self.height > resolution[1]): #crop too big on both axes
targetw = resolution[0]
targeth = resolution[1]
targetx = 0
targety = 0
x = (self.width-resolution[0])/2
y = (self.height-resolution[1])/2
img = img.crop(x,y,targetw,targeth)
return img
elif( self.width <= resolution[0] and self.height > resolution[1]): #height too big
#crop along the y dimension and center along the x dimension
retVal = np.zeros((resolution[1],resolution[0],3),dtype='uint8')
targetw = self.width
targeth = resolution[1]
targetx = (resolution[0]-self.width)/2
targety = 0
x = 0
y = (self.height-resolution[1])/2
img = img.crop(x,y,targetw,targeth)
elif( self.width > resolution[0] and self.height <= resolution[1]): #width too big
#crop along the y dimension and center along the x dimension
retVal = np.zeros((resolution[1],resolution[0],3),dtype='uint8')
targetw = resolution[0]
targeth = self.height
targetx = 0
targety = (resolution[1]-self.height)/2
x = (self.width-resolution[0])/2
y = 0
img = img.crop(x,y,targetw,targeth)
retVal[targety:targety + targeth,targetx:targetx + targetw,:] = img.getNumpyCv2()
retVal = Image(retVal,cv2image = True)
return(retVal)
def blit(self, img, pos=None,alpha=None,mask=None,alphaMask=None):
"""
**SUMMARY**
Blit aka bit blit - which in ye olden days was an acronym for bit-block transfer. In other words blit is
when you want to smash two images together, or add one image to another. This method takes in a second
SimpleCV image, and then allows you to add to some point on the calling image. A general blit command
will just copy all of the image. You can also copy the image with an alpha value to the source image
is semi-transparent. A binary mask can be used to blit non-rectangular image onto the souce image.
An alpha mask can be used to do and arbitrarily transparent image to this image. Both the mask and
alpha masks are SimpleCV Images.
**PARAMETERS**
* *img* - an image to place ontop of this image.
* *pos* - an (x,y) position tuple of the top left corner of img on this image. Note that these values
can be negative.
* *alpha* - a single floating point alpha value (0=see the bottom image, 1=see just img, 0.5 blend the two 50/50).
* *mask* - a binary mask the same size as the input image. White areas are blitted, black areas are not blitted.
* *alphaMask* - an alpha mask where each grayscale value maps how much of each image is shown.
**RETURNS**
A SimpleCV Image. The size will remain the same.
**EXAMPLE**
>>> topImg = Image("top.png")
>>> bottomImg = Image("bottom.png")
>>> mask = Image("mask.png")
>>> aMask = Image("alpphaMask.png")
>>> bottomImg.blit(top,pos=(100,100)).show()
>>> bottomImg.blit(top,alpha=0.5).show()
>>> bottomImg.blit(top,pos=(100,100),mask=mask).show()
>>> bottomImg.blit(top,pos=(-10,-10)alphaMask=aMask).show()
**SEE ALSO**
:py:meth:`createBinaryMask`
:py:meth:`createAlphaMask`
"""
retVal = Image(self.getEmpty())
cv.Copy(self.getBitmap(),retVal.getBitmap())
w = img.width
h = img.height
if( pos is None ):
pos = (0,0)
(topROI, bottomROI) = self._rectOverlapROIs((img.width,img.height),(self.width,self.height),pos)
if( alpha is not None ):
cv.SetImageROI(img.getBitmap(),topROI);
cv.SetImageROI(retVal.getBitmap(),bottomROI);
a = float(alpha)
b = float(1.00-a)
g = float(0.00)
cv.AddWeighted(img.getBitmap(),a,retVal.getBitmap(),b,g,retVal.getBitmap())
cv.ResetImageROI(img.getBitmap());
cv.ResetImageROI(retVal.getBitmap());
elif( alphaMask is not None ):
if( alphaMask is not None and (alphaMask.width != img.width or alphaMask.height != img.height ) ):
logger.warning("Image.blit: your mask and image don't match sizes, if the mask doesn't fit, you can not blit! Try using the scale function.")
return None
cImg = img.crop(topROI[0],topROI[1],topROI[2],topROI[3])
cMask = alphaMask.crop(topROI[0],topROI[1],topROI[2],topROI[3])
retValC = retVal.crop(bottomROI[0],bottomROI[1],bottomROI[2],bottomROI[3])
r = cImg.getEmpty(1)
g = cImg.getEmpty(1)
b = cImg.getEmpty(1)
cv.Split(cImg.getBitmap(), b, g, r, None)
rf=cv.CreateImage((cImg.width,cImg.height),cv.IPL_DEPTH_32F,1)
gf=cv.CreateImage((cImg.width,cImg.height),cv.IPL_DEPTH_32F,1)
bf=cv.CreateImage((cImg.width,cImg.height),cv.IPL_DEPTH_32F,1)
af=cv.CreateImage((cImg.width,cImg.height),cv.IPL_DEPTH_32F,1)
cv.ConvertScale(r,rf)
cv.ConvertScale(g,gf)
cv.ConvertScale(b,bf)
cv.ConvertScale(cMask._getGrayscaleBitmap(),af)
cv.ConvertScale(af,af,scale=(1.0/255.0))
cv.Mul(rf,af,rf)
cv.Mul(gf,af,gf)
cv.Mul(bf,af,bf)
dr = retValC.getEmpty(1)
dg = retValC.getEmpty(1)
db = retValC.getEmpty(1)
cv.Split(retValC.getBitmap(), db, dg, dr, None)
drf=cv.CreateImage((retValC.width,retValC.height),cv.IPL_DEPTH_32F,1)
dgf=cv.CreateImage((retValC.width,retValC.height),cv.IPL_DEPTH_32F,1)
dbf=cv.CreateImage((retValC.width,retValC.height),cv.IPL_DEPTH_32F,1)
daf=cv.CreateImage((retValC.width,retValC.height),cv.IPL_DEPTH_32F,1)
cv.ConvertScale(dr,drf)
cv.ConvertScale(dg,dgf)
cv.ConvertScale(db,dbf)
cv.ConvertScale(cMask.invert()._getGrayscaleBitmap(),daf)
cv.ConvertScale(daf,daf,scale=(1.0/255.0))
cv.Mul(drf,daf,drf)
cv.Mul(dgf,daf,dgf)
cv.Mul(dbf,daf,dbf)
cv.Add(rf,drf,rf)
cv.Add(gf,dgf,gf)
cv.Add(bf,dbf,bf)
cv.ConvertScaleAbs(rf,r)
cv.ConvertScaleAbs(gf,g)
cv.ConvertScaleAbs(bf,b)
cv.Merge(b,g,r,None,retValC.getBitmap())
cv.SetImageROI(retVal.getBitmap(),bottomROI)
cv.Copy(retValC.getBitmap(),retVal.getBitmap())
cv.ResetImageROI(retVal.getBitmap())
elif( mask is not None):
if( mask is not None and (mask.width != img.width or mask.height != img.height ) ):
logger.warning("Image.blit: your mask and image don't match sizes, if the mask doesn't fit, you can not blit! Try using the scale function. ")
return None
cv.SetImageROI(img.getBitmap(),topROI)
cv.SetImageROI(mask.getBitmap(),topROI)
cv.SetImageROI(retVal.getBitmap(),bottomROI)
cv.Copy(img.getBitmap(),retVal.getBitmap(),mask.getBitmap())
cv.ResetImageROI(img.getBitmap())
cv.ResetImageROI(mask.getBitmap())
cv.ResetImageROI(retVal.getBitmap())
else: #vanilla blit
cv.SetImageROI(img.getBitmap(),topROI)
cv.SetImageROI(retVal.getBitmap(),bottomROI)
cv.Copy(img.getBitmap(),retVal.getBitmap())
cv.ResetImageROI(img.getBitmap())
cv.ResetImageROI(retVal.getBitmap())
return retVal
def sideBySide(self, image, side="right", scale=True ):
"""
**SUMMARY**
Combine two images as a side by side images. Great for before and after images.
**PARAMETERS**
* *side* - what side of this image to place the other image on.
choices are ('left'/'right'/'top'/'bottom').
* *scale* - if true scale the smaller of the two sides to match the
edge touching the other image. If false we center the smaller
of the two images on the edge touching the larger image.
**RETURNS**
A new image that is a combination of the two images.
**EXAMPLE**
>>> img = Image("lenna")
>>> img2 = Image("orson_welles.jpg")
>>> img3 = img.sideBySide(img2)
**TODO**
Make this accept a list of images.
"""
#there is probably a cleaner way to do this, but I know I hit every case when they are enumerated
retVal = None
if( side == "top" ):
#clever
retVal = image.sideBySide(self,"bottom",scale)
elif( side == "bottom" ):
if( self.width > image.width ):
if( scale ):
#scale the other image width to fit
resized = image.resize(w=self.width)
nW = self.width
nH = self.height + resized.height
newCanvas = cv.CreateImage((nW,nH), cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
cv.SetImageROI(newCanvas,(0,0,nW,self.height))
cv.Copy(self.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
cv.SetImageROI(newCanvas,(0,self.height,resized.width,resized.height))
cv.Copy(resized.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
retVal = Image(newCanvas,colorSpace=self._colorSpace)
else:
nW = self.width
nH = self.height + image.height
newCanvas = cv.CreateImage((nW,nH), cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
cv.SetImageROI(newCanvas,(0,0,nW,self.height))
cv.Copy(self.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
xc = (self.width-image.width)/2
cv.SetImageROI(newCanvas,(xc,self.height,image.width,image.height))
cv.Copy(image.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
retVal = Image(newCanvas,colorSpace=self._colorSpace)
else: #our width is smaller than the other image
if( scale ):
#scale the other image width to fit
resized = self.resize(w=image.width)
nW = image.width
nH = resized.height + image.height
newCanvas = cv.CreateImage((nW,nH), cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
cv.SetImageROI(newCanvas,(0,0,resized.width,resized.height))
cv.Copy(resized.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
cv.SetImageROI(newCanvas,(0,resized.height,nW,image.height))
cv.Copy(image.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
retVal = Image(newCanvas,colorSpace=self._colorSpace)
else:
nW = image.width
nH = self.height + image.height
newCanvas = cv.CreateImage((nW,nH), cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
xc = (image.width - self.width)/2
cv.SetImageROI(newCanvas,(xc,0,self.width,self.height))
cv.Copy(self.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
cv.SetImageROI(newCanvas,(0,self.height,image.width,image.height))
cv.Copy(image.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
retVal = Image(newCanvas,colorSpace=self._colorSpace)
elif( side == "right" ):
retVal = image.sideBySide(self,"left",scale)
else: #default to left
if( self.height > image.height ):
if( scale ):
#scale the other image height to fit
resized = image.resize(h=self.height)
nW = self.width + resized.width
nH = self.height
newCanvas = cv.CreateImage((nW,nH), cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
cv.SetImageROI(newCanvas,(0,0,resized.width,resized.height))
cv.Copy(resized.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
cv.SetImageROI(newCanvas,(resized.width,0,self.width,self.height))
cv.Copy(self.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
retVal = Image(newCanvas,colorSpace=self._colorSpace)
else:
nW = self.width+image.width
nH = self.height
newCanvas = cv.CreateImage((nW,nH), cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
yc = (self.height-image.height)/2
cv.SetImageROI(newCanvas,(0,yc,image.width,image.height))
cv.Copy(image.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
cv.SetImageROI(newCanvas,(image.width,0,self.width,self.height))
cv.Copy(self.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
retVal = Image(newCanvas,colorSpace=self._colorSpace)
else: #our height is smaller than the other image
if( scale ):
#scale our height to fit
resized = self.resize(h=image.height)
nW = image.width + resized.width
nH = image.height
newCanvas = cv.CreateImage((nW,nH), cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
cv.SetImageROI(newCanvas,(0,0,image.width,image.height))
cv.Copy(image.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
cv.SetImageROI(newCanvas,(image.width,0,resized.width,resized.height))
cv.Copy(resized.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
retVal = Image(newCanvas,colorSpace=self._colorSpace)
else:
nW = image.width + self.width
nH = image.height
newCanvas = cv.CreateImage((nW,nH), cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
cv.SetImageROI(newCanvas,(0,0,image.width,image.height))
cv.Copy(image.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
yc = (image.height-self.height)/2
cv.SetImageROI(newCanvas,(image.width,yc,self.width,self.height))
cv.Copy(self.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
retVal = Image(newCanvas,colorSpace=self._colorSpace)
return retVal
def embiggen(self, size=None, color=Color.BLACK, pos=None):
"""
**SUMMARY**
Make the canvas larger but keep the image the same size.
**PARAMETERS**
* *size* - width and heigt tuple of the new canvas or give a single vaule in which to scale the image size, for instance size=2 would make the image canvas twice the size
* *color* - the color of the canvas
* *pos* - the position of the top left corner of image on the new canvas,
if none the image is centered.
**RETURNS**
The enlarged SimpleCV Image.
**EXAMPLE**
>>> img = Image("lenna")
>>> img = img.embiggen((1024,1024),color=Color.BLUE)
>>> img.show()
"""
if not isinstance(size, tuple) and size > 1:
size = (self.width * size, self.height * size)
if( size == None or size[0] < self.width or size[1] < self.height ):
logger.warning("image.embiggenCanvas: the size provided is invalid")
return None
newCanvas = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
newColor = cv.RGB(color[0],color[1],color[2])
cv.AddS(newCanvas,newColor,newCanvas)
topROI = None
bottomROI = None
if( pos is None ):
pos = (((size[0]-self.width)/2),((size[1]-self.height)/2))
(topROI, bottomROI) = self._rectOverlapROIs((self.width,self.height),size,pos)
if( topROI is None or bottomROI is None):
logger.warning("image.embiggenCanvas: the position of the old image doesn't make sense, there is no overlap")
return None
cv.SetImageROI(newCanvas, bottomROI)
cv.SetImageROI(self.getBitmap(),topROI)
cv.Copy(self.getBitmap(),newCanvas)
cv.ResetImageROI(newCanvas)
cv.ResetImageROI(self.getBitmap())
return Image(newCanvas)
def _rectOverlapROIs(self,top, bottom, pos):
"""
top is a rectangle (w,h)
bottom is a rectangle (w,h)
pos is the top left corner of the top rectangle with respect to the bottom rectangle's top left corner
method returns none if the two rectangles do not overlap. Otherwise returns the top rectangle's ROI (x,y,w,h)
and the bottom rectangle's ROI (x,y,w,h)
"""
# the position of the top rect coordinates give bottom top right = (0,0)
tr = (pos[0]+top[0],pos[1])
tl = pos
br = (pos[0]+top[0],pos[1]+top[1])
bl = (pos[0],pos[1]+top[1])
# do an overlap test to weed out corner cases and errors
def inBounds((w,h), (x,y)):
retVal = True
if( x < 0 or y < 0 or x > w or y > h):
retVal = False
return retVal
trc = inBounds(bottom,tr)
tlc = inBounds(bottom,tl)
brc = inBounds(bottom,br)
blc = inBounds(bottom,bl)
if( not trc and not tlc and not brc and not blc ): # no overlap
return None,None
elif( trc and tlc and brc and blc ): # easy case top is fully inside bottom
tRet = (0,0,top[0],top[1])
bRet = (pos[0],pos[1],top[0],top[1])
return tRet,bRet
# let's figure out where the top rectangle sits on the bottom
# we clamp the corners of the top rectangle to live inside
# the bottom rectangle and from that get the x,y,w,h
tl = (np.clip(tl[0],0,bottom[0]),np.clip(tl[1],0,bottom[1]))
br = (np.clip(br[0],0,bottom[0]),np.clip(br[1],0,bottom[1]))
bx = tl[0]
by = tl[1]
bw = abs(tl[0]-br[0])
bh = abs(tl[1]-br[1])
# now let's figure where the bottom rectangle is in the top rectangle
# we do the same thing with different coordinates
pos = (-1*pos[0], -1*pos[1])
#recalculate the bottoms's corners with respect to the top.
tr = (pos[0]+bottom[0],pos[1])
tl = pos
br = (pos[0]+bottom[0],pos[1]+bottom[1])
bl = (pos[0],pos[1]+bottom[1])
tl = (np.clip(tl[0],0,top[0]), np.clip(tl[1],0,top[1]))
br = (np.clip(br[0],0,top[0]), np.clip(br[1],0,top[1]))
tx = tl[0]
ty = tl[1]
tw = abs(br[0]-tl[0])
th = abs(br[1]-tl[1])
return (tx,ty,tw,th),(bx,by,bw,bh)
def createBinaryMask(self,color1=(0,0,0),color2=(255,255,255)):
"""
**SUMMARY**
Generate a binary mask of the image based on a range of rgb values.
A binary mask is a black and white image where the white area is kept and the
black area is removed.
This method is used by specifying two colors as the range between the minimum and maximum
values that will be masked white.
**PARAMETERS**
* *color1* - The starting color range for the mask..
* *color2* - The end of the color range for the mask.
**RETURNS**
A binary (black/white) image mask as a SimpleCV Image.
**EXAMPLE**
>>> img = Image("lenna")
>>> mask = img.createBinaryMask(color1=(0,128,128),color2=(255,255,255)
>>> mask.show()
**SEE ALSO**
:py:meth:`createBinaryMask`
:py:meth:`createAlphaMask`
:py:meth:`blit`
:py:meth:`threshold`
"""
if( color1[0]-color2[0] == 0 or
color1[1]-color2[1] == 0 or
color1[2]-color2[2] == 0 ):
logger.warning("No color range selected, the result will be black, returning None instead.")
return None
if( color1[0] > 255 or color1[0] < 0 or
color1[1] > 255 or color1[1] < 0 or
color1[2] > 255 or color1[2] < 0 or
color2[0] > 255 or color2[0] < 0 or
color2[1] > 255 or color2[1] < 0 or
color2[2] > 255 or color2[2] < 0 ):
logger.warning("One of the tuple values falls outside of the range of 0 to 255")
return None
r = self.getEmpty(1)
g = self.getEmpty(1)
b = self.getEmpty(1)
rl = self.getEmpty(1)
gl = self.getEmpty(1)
bl = self.getEmpty(1)
rh = self.getEmpty(1)
gh = self.getEmpty(1)
bh = self.getEmpty(1)
cv.Split(self.getBitmap(),b,g,r,None);
#the difference == 255 case is where open CV
#kinda screws up, this should just be a white image
if( abs(color1[0]-color2[0]) == 255 ):
cv.Zero(rl)
cv.AddS(rl,255,rl)
#there is a corner case here where difference == 0
#right now we throw an error on this case.
#also we use the triplets directly as OpenCV is
# SUPER FINICKY about the type of the threshold.
elif( color1[0] < color2[0] ):
cv.Threshold(r,rl,color1[0],255,cv.CV_THRESH_BINARY)
cv.Threshold(r,rh,color2[0],255,cv.CV_THRESH_BINARY)
cv.Sub(rl,rh,rl)
else:
cv.Threshold(r,rl,color2[0],255,cv.CV_THRESH_BINARY)
cv.Threshold(r,rh,color1[0],255,cv.CV_THRESH_BINARY)
cv.Sub(rl,rh,rl)
if( abs(color1[1]-color2[1]) == 255 ):
cv.Zero(gl)
cv.AddS(gl,255,gl)
elif( color1[1] < color2[1] ):
cv.Threshold(g,gl,color1[1],255,cv.CV_THRESH_BINARY)
cv.Threshold(g,gh,color2[1],255,cv.CV_THRESH_BINARY)
cv.Sub(gl,gh,gl)
else:
cv.Threshold(g,gl,color2[1],255,cv.CV_THRESH_BINARY)
cv.Threshold(g,gh,color1[1],255,cv.CV_THRESH_BINARY)
cv.Sub(gl,gh,gl)
if( abs(color1[2]-color2[2]) == 255 ):
cv.Zero(bl)
cv.AddS(bl,255,bl)
elif( color1[2] < color2[2] ):
cv.Threshold(b,bl,color1[2],255,cv.CV_THRESH_BINARY)
cv.Threshold(b,bh,color2[2],255,cv.CV_THRESH_BINARY)
cv.Sub(bl,bh,bl)
else:
cv.Threshold(b,bl,color2[2],255,cv.CV_THRESH_BINARY)
cv.Threshold(b,bh,color1[2],255,cv.CV_THRESH_BINARY)
cv.Sub(bl,bh,bl)
cv.And(rl,gl,rl)
cv.And(rl,bl,rl)
return Image(rl)
def applyBinaryMask(self, mask,bg_color=Color.BLACK):
"""
**SUMMARY**
Apply a binary mask to the image. The white areas of the mask will be kept,
and the black areas removed. The removed areas will be set to the color of
bg_color.
**PARAMETERS**
* *mask* - the binary mask image. White areas are kept, black areas are removed.
* *bg_color* - the color of the background on the mask.
**RETURNS**
A binary (black/white) image mask as a SimpleCV Image.
**EXAMPLE**
>>> img = Image("lenna")
>>> mask = img.createBinaryMask(color1=(0,128,128),color2=(255,255,255)
>>> result = img.applyBinaryMask(mask)
>>> result.show()
**SEE ALSO**
:py:meth:`createBinaryMask`
:py:meth:`createAlphaMask`
:py:meth:`applyBinaryMask`
:py:meth:`blit`
:py:meth:`threshold`
"""
newCanvas = cv.CreateImage((self.width,self.height), cv.IPL_DEPTH_8U, 3)
cv.SetZero(newCanvas)
newBG = cv.RGB(bg_color[0],bg_color[1],bg_color[2])
cv.AddS(newCanvas,newBG,newCanvas)
if( mask.width != self.width or mask.height != self.height ):
logger.warning("Image.applyBinaryMask: your mask and image don't match sizes, if the mask doesn't fit, you can't apply it! Try using the scale function. ")
return None
cv.Copy(self.getBitmap(),newCanvas,mask.getBitmap());
return Image(newCanvas,colorSpace=self._colorSpace);
def createAlphaMask(self, hue=60, hue_lb=None,hue_ub=None):
"""
**SUMMARY**
Generate a grayscale or binary mask image based either on a hue or an RGB triplet that can be used
like an alpha channel. In the resulting mask, the hue/rgb_color will be treated as transparent (black).
When a hue is used the mask is treated like an 8bit alpha channel.
When an RGB triplet is used the result is a binary mask.
rgb_thresh is a distance measure between a given a pixel and the mask value that we will
add to the mask. For example, if rgb_color=(0,255,0) and rgb_thresh=5 then any pixel
winthin five color values of the rgb_color will be added to the mask (e.g. (0,250,0),(5,255,0)....)
Invert flips the mask values.
**PARAMETERS**
* *hue* - a hue used to generate the alpha mask.
* *hue_lb* - the upper value of a range of hue values to use.
* *hue_ub* - the lower value of a range of hue values to use.
**RETURNS**
A grayscale alpha mask as a SimpleCV Image.
>>> img = Image("lenna")
>>> mask = img.createAlphaMask(hue_lb=50,hue_ub=70)
>>> mask.show()
**SEE ALSO**
:py:meth:`createBinaryMask`
:py:meth:`createAlphaMask`
:py:meth:`applyBinaryMask`
:py:meth:`blit`
:py:meth:`threshold`
"""
if( hue<0 or hue > 180 ):
logger.warning("Invalid hue color, valid hue range is 0 to 180.")
if( self._colorSpace != ColorSpace.HSV ):
hsv = self.toHSV()
else:
hsv = self
h = hsv.getEmpty(1)
s = hsv.getEmpty(1)
retVal = hsv.getEmpty(1)
mask = hsv.getEmpty(1)
cv.Split(hsv.getBitmap(),h,None,s,None)
hlut = np.zeros((256,1),dtype=uint8) #thankfully we're not doing a LUT on saturation
if(hue_lb is not None and hue_ub is not None):
hlut[hue_lb:hue_ub]=255
else:
hlut[hue] = 255
cv.LUT(h,mask,cv.fromarray(hlut))
cv.Copy(s,retVal,mask) #we'll save memory using hue
return Image(retVal)
def applyPixelFunction(self, theFunc):
"""
**SUMMARY**
apply a function to every pixel and return the result
The function must be of the form int (r,g,b)=func((r,g,b))
**PARAMETERS**
* *theFunc* - a function pointer to a function of the form (r,g.b) = theFunc((r,g,b))
**RETURNS**
A simpleCV image after mapping the function to the image.
**EXAMPLE**
>>> def derp(pixels):
>>> return (int(b*.2),int(r*.3),int(g*.5))
>>>
>>> img = Image("lenna")
>>> img2 = img.applyPixelFunction(derp)
"""
#there should be a way to do this faster using numpy vectorize
#but I can get vectorize to work with the three channels together... have to split them
#TODO: benchmark this against vectorize
pixels = np.array(self.getNumpy()).reshape(-1,3).tolist()
result = np.array(map(theFunc,pixels),dtype=uint8).reshape(self.width,self.height,3)
return Image(result)
def integralImage(self,tilted=False):
"""
**SUMMARY**
Calculate the integral image and return it as a numpy array.
The integral image gives the sum of all of the pixels above and to the
right of a given pixel location. It is useful for computing Haar cascades.
The return type is a numpy array the same size of the image. The integral
image requires 32Bit values which are not easily supported by the SimpleCV
Image class.
**PARAMETERS**
* *tilted* - if tilted is true we tilt the image 45 degrees and then calculate the results.
**RETURNS**
A numpy array of the values.
**EXAMPLE**
>>> img = Image("logo")
>>> derp = img.integralImage()
**SEE ALSO**
http://en.wikipedia.org/wiki/Summed_area_table
"""
if(tilted):
img2 = cv.CreateImage((self.width+1, self.height+1), cv.IPL_DEPTH_32F, 1)
img3 = cv.CreateImage((self.width+1, self.height+1), cv.IPL_DEPTH_32F, 1)
cv.Integral(self._getGrayscaleBitmap(),img3,None,img2)
else:
img2 = cv.CreateImage((self.width+1, self.height+1), cv.IPL_DEPTH_32F, 1)
cv.Integral(self._getGrayscaleBitmap(),img2)
return np.array(cv.GetMat(img2))
def convolve(self,kernel = [[1,0,0],[0,1,0],[0,0,1]],center=None):
"""
**SUMMARY**
Convolution performs a shape change on an image. It is similiar to
something like a dilate. You pass it a kernel in the form of a list, np.array, or cvMat
**PARAMETERS**
* *kernel* - The convolution kernel. As a cvArray, cvMat, or Numpy Array.
* *center* - If true we use the center of the kernel.
**RETURNS**
The image after we apply the convolution.
**EXAMPLE**
>>> img = Image("sampleimages/simplecv.png")
>>> kernel = [[1,0,0],[0,1,0],[0,0,1]]
>>> conv = img.convolve()
**SEE ALSO**
http://en.wikipedia.org/wiki/Convolution
"""
if(isinstance(kernel, list)):
kernel = np.array(kernel)
if(type(kernel)==np.ndarray):
sz = kernel.shape
kernel = kernel.astype(np.float32)
myKernel = cv.CreateMat(sz[0], sz[1], cv.CV_32FC1)
cv.SetData(myKernel, kernel.tostring(), kernel.dtype.itemsize * kernel.shape[1])
elif(type(kernel)==cv.mat):
myKernel = kernel
else:
logger.warning("Convolution uses numpy arrays or cv.mat type.")
return None
retVal = self.getEmpty(3)
if(center is None):
cv.Filter2D(self.getBitmap(),retVal,myKernel)
else:
cv.Filter2D(self.getBitmap(),retVal,myKernel,center)
return Image(retVal)
def findTemplate(self, template_image = None, threshold = 5, method = "SQR_DIFF_NORM", grayscale=True, rawmatches = False):
"""
**SUMMARY**
This function searches an image for a template image. The template
image is a smaller image that is searched for in the bigger image.
This is a basic pattern finder in an image. This uses the standard
OpenCV template (pattern) matching and cannot handle scaling or rotation
Template matching returns a match score for every pixel in the image.
Often pixels that are near to each other and a close match to the template
are returned as a match. If the threshold is set too low expect to get
a huge number of values. The threshold parameter is in terms of the
number of standard deviations from the mean match value you are looking
For example, matches that are above three standard deviations will return
0.1% of the pixels. In a 800x600 image this means there will be
800*600*0.001 = 480 matches.
This method returns the locations of wherever it finds a match above a
threshold. Because of how template matching works, very often multiple
instances of the template overlap significantly. The best approach is to
find the centroid of all of these values. We suggest using an iterative
k-means approach to find the centroids.
**PARAMETERS**
* *template_image* - The template image.
* *threshold* - Int
* *method* -
* SQR_DIFF_NORM - Normalized square difference
* SQR_DIFF - Square difference
* CCOEFF -
* CCOEFF_NORM -
* CCORR - Cross correlation
* CCORR_NORM - Normalize cross correlation
* *grayscale* - Boolean - If false, template Match is found using BGR image.
**EXAMPLE**
>>> image = Image("/path/to/img.png")
>>> pattern_image = image.crop(100,100,100,100)
>>> found_patterns = image.findTemplate(pattern_image)
>>> found_patterns.draw()
>>> image.show()
**RETURNS**
This method returns a FeatureSet of TemplateMatch objects.
"""
if(template_image == None):
logger.info( "Need image for matching")
return
if(template_image.width > self.width):
#logger.info( "Image too wide")
return
if(template_image.height > self.height):
logger.info("Image too tall")
return
check = 0; # if check = 0 we want maximal value, otherwise minimal
if(method is None or method == "" or method == "SQR_DIFF_NORM"):#minimal
method = cv.CV_TM_SQDIFF_NORMED
check = 1;
elif(method == "SQR_DIFF"): #minimal
method = cv.CV_TM_SQDIFF
check = 1
elif(method == "CCOEFF"): #maximal
method = cv.CV_TM_CCOEFF
elif(method == "CCOEFF_NORM"): #maximal
method = cv.CV_TM_CCOEFF_NORMED
elif(method == "CCORR"): #maximal
method = cv.CV_TM_CCORR
elif(method == "CCORR_NORM"): #maximal
method = cv.CV_TM_CCORR_NORMED
else:
logger.warning("ooops.. I don't know what template matching method you are looking for.")
return None
#create new image for template matching computation
matches = cv.CreateMat( (self.height - template_image.height + 1),
(self.width - template_image.width + 1),
cv.CV_32FC1)
#choose template matching method to be used
if grayscale:
cv.MatchTemplate( self._getGrayscaleBitmap(), template_image._getGrayscaleBitmap(), matches, method )
else:
cv.MatchTemplate( self.getBitmap(), template_image.getBitmap(), matches, method )
mean = np.mean(matches)
sd = np.std(matches)
if(check > 0):
compute = np.where((matches < mean-threshold*sd) )
else:
compute = np.where((matches > mean+threshold*sd) )
mapped = map(tuple, np.column_stack(compute))
fs = FeatureSet()
for location in mapped:
fs.append(TemplateMatch(self, template_image, (location[1],location[0]), matches[location[0], location[1]]))
if (rawmatches):
return fs
#cluster overlapping template matches
finalfs = FeatureSet()
if( len(fs) > 0 ):
finalfs.append(fs[0])
for f in fs:
match = False
for f2 in finalfs:
if( f2._templateOverlaps(f) ): #if they overlap
f2.consume(f) #merge them
match = True
break
if( not match ):
finalfs.append(f)
for f in finalfs: #rescale the resulting clusters to fit the template size
f.rescale(template_image.width,template_image.height)
fs = finalfs
return fs
def findTemplateOnce(self, template_image = None, threshold = 0.2, method = "SQR_DIFF_NORM", grayscale=True):
"""
**SUMMARY**
This function searches an image for a single template image match.The template
image is a smaller image that is searched for in the bigger image.
This is a basic pattern finder in an image. This uses the standard
OpenCV template (pattern) matching and cannot handle scaling or rotation
This method returns the single best match if and only if that
match less than the threshold (greater than in the case of
some methods).
**PARAMETERS**
* *template_image* - The template image.
* *threshold* - Int
* *method* -
* SQR_DIFF_NORM - Normalized square difference
* SQR_DIFF - Square difference
* CCOEFF -
* CCOEFF_NORM -
* CCORR - Cross correlation
* CCORR_NORM - Normalize cross correlation
* *grayscale* - Boolean - If false, template Match is found using BGR image.
**EXAMPLE**
>>> image = Image("/path/to/img.png")
>>> pattern_image = image.crop(100,100,100,100)
>>> found_patterns = image.findTemplateOnce(pattern_image)
>>> found_patterns.draw()
>>> image.show()
**RETURNS**
This method returns a FeatureSet of TemplateMatch objects.
"""
if(template_image == None):
logger.info( "Need image for template matching.")
return
if(template_image.width > self.width):
logger.info( "Template image is too wide for the given image.")
return
if(template_image.height > self.height):
logger.info("Template image too tall for the given image.")
return
check = 0; # if check = 0 we want maximal value, otherwise minimal
if(method is None or method == "" or method == "SQR_DIFF_NORM"):#minimal
method = cv.CV_TM_SQDIFF_NORMED
check = 1;
elif(method == "SQR_DIFF"): #minimal
method = cv.CV_TM_SQDIFF
check = 1
elif(method == "CCOEFF"): #maximal
method = cv.CV_TM_CCOEFF
elif(method == "CCOEFF_NORM"): #maximal
method = cv.CV_TM_CCOEFF_NORMED
elif(method == "CCORR"): #maximal
method = cv.CV_TM_CCORR
elif(method == "CCORR_NORM"): #maximal
method = cv.CV_TM_CCORR_NORMED
else:
logger.warning("ooops.. I don't know what template matching method you are looking for.")
return None
#create new image for template matching computation
matches = cv.CreateMat( (self.height - template_image.height + 1),
(self.width - template_image.width + 1),
cv.CV_32FC1)
#choose template matching method to be used
if grayscale:
cv.MatchTemplate( self._getGrayscaleBitmap(), template_image._getGrayscaleBitmap(), matches, method )
else:
cv.MatchTemplate( self.getBitmap(), template_image.getBitmap(), matches, method )
mean = np.mean(matches)
sd = np.std(matches)
if(check > 0):
if( np.min(matches) <= threshold ):
compute = np.where( matches == np.min(matches) )
else:
return []
else:
if( np.max(matches) >= threshold ):
compute = np.where( matches == np.max(matches) )
else:
return []
mapped = map(tuple, np.column_stack(compute))
fs = FeatureSet()
for location in mapped:
fs.append(TemplateMatch(self, template_image, (location[1],location[0]), matches[location[0], location[1]]))
return fs
def readText(self):
"""
**SUMMARY**
This function will return any text it can find using OCR on the
image.
Please note that it does not handle rotation well, so if you need
it in your application try to rotate and/or crop the area so that
the text would be the same way a document is read
**RETURNS**
A String
**EXAMPLE**
>>> img = Imgae("somethingwithtext.png")
>>> text = img.readText()
>>> print text
**NOTE**
If you're having run-time problems I feel bad for your son,
I've got 99 problems but dependencies ain't one:
http://code.google.com/p/tesseract-ocr/
http://code.google.com/p/python-tesseract/
"""
if(not OCR_ENABLED):
return "Please install the correct OCR library required - http://code.google.com/p/tesseract-ocr/ http://code.google.com/p/python-tesseract/"
api = tesseract.TessBaseAPI()
api.SetOutputName("outputName")
api.Init(".","eng",tesseract.OEM_DEFAULT)
api.SetPageSegMode(tesseract.PSM_AUTO)
jpgdata = StringIO()
self.getPIL().save(jpgdata, "jpeg")
jpgdata.seek(0)
stringbuffer = jpgdata.read()
result = tesseract.ProcessPagesBuffer(stringbuffer,len(stringbuffer),api)
return result
def findCircle(self,canny=100,thresh=350,distance=-1):
"""
**SUMMARY**
Perform the Hough Circle transform to extract _perfect_ circles from the image
canny - the upper bound on a canny edge detector used to find circle edges.
**PARAMETERS**
* *thresh* - the threshold at which to count a circle. Small parts of a circle get
added to the accumulator array used internally to the array. This value is the
minimum threshold. Lower thresholds give more circles, higher thresholds give fewer circles.
.. ::Warning:
If this threshold is too high, and no circles are found the underlying OpenCV
routine fails and causes a segfault.
* *distance* - the minimum distance between each successive circle in pixels. 10 is a good
starting value.
**RETURNS**
A feature set of Circle objects.
**EXAMPLE**
>>> img = Image("lenna")
>>> circs = img.findCircles()
>>> for c in circs:
>>> print c
"""
storage = cv.CreateMat(self.width, 1, cv.CV_32FC3)
#a distnace metric for how apart our circles should be - this is sa good bench mark
if(distance < 0 ):
distance = 1 + max(self.width,self.height)/50
cv.HoughCircles(self._getGrayscaleBitmap(),storage, cv.CV_HOUGH_GRADIENT, 2, distance,canny,thresh)
if storage.rows == 0:
return None
circs = np.asarray(storage)
sz = circs.shape
circleFS = FeatureSet()
for i in range(sz[0]):
circleFS.append(Circle(self,int(circs[i][0][0]),int(circs[i][0][1]),int(circs[i][0][2])))
return circleFS
def whiteBalance(self,method="Simple"):
"""
**SUMMARY**
Attempts to perform automatic white balancing.
Gray World see: http://scien.stanford.edu/pages/labsite/2000/psych221/projects/00/trek/GWimages.html
Robust AWB: http://scien.stanford.edu/pages/labsite/2010/psych221/projects/2010/JasonSu/robustawb.html
http://scien.stanford.edu/pages/labsite/2010/psych221/projects/2010/JasonSu/Papers/Robust%20Automatic%20White%20Balance%20Algorithm%20using%20Gray%20Color%20Points%20in%20Images.pdf
Simple AWB:
http://www.ipol.im/pub/algo/lmps_simplest_color_balance/
http://scien.stanford.edu/pages/labsite/2010/psych221/projects/2010/JasonSu/simplestcb.html
**PARAMETERS**
* *method* - The method to use for white balancing. Can be one of the following:
* `Gray World <http://scien.stanford.edu/pages/labsite/2000/psych221/projects/00/trek/GWimages.html>`_
* `Robust AWB <http://scien.stanford.edu/pages/labsite/2010/psych221/projects/2010/JasonSu/robustawb.html>`_
* `Simple AWB <http://www.ipol.im/pub/algo/lmps_simplest_color_balance/>`_
**RETURNS**
A SimpleCV Image.
**EXAMPLE**
>>> img = Image("lenna")
>>> img2 = img.whiteBalance()
"""
img = self
if(method=="GrayWorld"):
avg = cv.Avg(img.getBitmap());
bf = float(avg[0])
gf = float(avg[1])
rf = float(avg[2])
af = (bf+gf+rf)/3.0
if( bf == 0.00 ):
b_factor = 1.00
else:
b_factor = af/bf
if( gf == 0.00 ):
g_factor = 1.00
else:
g_factor = af/gf
if( rf == 0.00 ):
r_factor = 1.00
else:
r_factor = af/rf
b = img.getEmpty(1)
g = img.getEmpty(1)
r = img.getEmpty(1)
cv.Split(self.getBitmap(), b, g, r, None)
bfloat = cv.CreateImage((img.width, img.height), cv.IPL_DEPTH_32F, 1)
gfloat = cv.CreateImage((img.width, img.height), cv.IPL_DEPTH_32F, 1)
rfloat = cv.CreateImage((img.width, img.height), cv.IPL_DEPTH_32F, 1)
cv.ConvertScale(b,bfloat,b_factor)
cv.ConvertScale(g,gfloat,g_factor)
cv.ConvertScale(r,rfloat,r_factor)
(minB,maxB,minBLoc,maxBLoc) = cv.MinMaxLoc(bfloat)
(minG,maxG,minGLoc,maxGLoc) = cv.MinMaxLoc(gfloat)
(minR,maxR,minRLoc,maxRLoc) = cv.MinMaxLoc(rfloat)
scale = max([maxR,maxG,maxB])
sfactor = 1.00
if(scale > 255 ):
sfactor = 255.00/float(scale)
cv.ConvertScale(bfloat,b,sfactor);
cv.ConvertScale(gfloat,g,sfactor);
cv.ConvertScale(rfloat,r,sfactor);
retVal = img.getEmpty()
cv.Merge(b,g,r,None,retVal);
retVal = Image(retVal)
elif( method == "Simple" ):
thresh = 0.003
sz = img.width*img.height
tempMat = img.getNumpy()
bcf = sss.cumfreq(tempMat[:,:,0], numbins=256)
bcf = bcf[0] # get our cumulative histogram of values for this color
blb = -1 #our upper bound
bub = 256 # our lower bound
lower_thresh = 0.00
upper_thresh = 0.00
#now find the upper and lower thresh% of our values live
while( lower_thresh < thresh ):
blb = blb+1
lower_thresh = bcf[blb]/sz
while( upper_thresh < thresh ):
bub = bub-1
upper_thresh = (sz-bcf[bub])/sz
gcf = sss.cumfreq(tempMat[:,:,1], numbins=256)
gcf = gcf[0]
glb = -1 #our upper bound
gub = 256 # our lower bound
lower_thresh = 0.00
upper_thresh = 0.00
#now find the upper and lower thresh% of our values live
while( lower_thresh < thresh ):
glb = glb+1
lower_thresh = gcf[glb]/sz
while( upper_thresh < thresh ):
gub = gub-1
upper_thresh = (sz-gcf[gub])/sz
rcf = sss.cumfreq(tempMat[:,:,2], numbins=256)
rcf = rcf[0]
rlb = -1 #our upper bound
rub = 256 # our lower bound
lower_thresh = 0.00
upper_thresh = 0.00
#now find the upper and lower thresh% of our values live
while( lower_thresh < thresh ):
rlb = rlb+1
lower_thresh = rcf[rlb]/sz
while( upper_thresh < thresh ):
rub = rub-1
upper_thresh = (sz-rcf[rub])/sz
#now we create the scale factors for the remaining pixels
rlbf = float(rlb)
rubf = float(rub)
glbf = float(glb)
gubf = float(gub)
blbf = float(blb)
bubf = float(bub)
rLUT = np.ones((256,1),dtype=uint8)
gLUT = np.ones((256,1),dtype=uint8)
bLUT = np.ones((256,1),dtype=uint8)
for i in range(256):
if(i <= rlb):
rLUT[i][0] = 0
elif( i >= rub):
rLUT[i][0] = 255
else:
rf = ((float(i)-rlbf)*255.00/(rubf-rlbf))
rLUT[i][0] = int(rf)
if( i <= glb):
gLUT[i][0] = 0
elif( i >= gub):
gLUT[i][0] = 255
else:
gf = ((float(i)-glbf)*255.00/(gubf-glbf))
gLUT[i][0] = int(gf)
if( i <= blb):
bLUT[i][0] = 0
elif( i >= bub):
bLUT[i][0] = 255
else:
bf = ((float(i)-blbf)*255.00/(bubf-blbf))
bLUT[i][0] = int(bf)
retVal = img.applyLUT(bLUT,rLUT,gLUT)
return retVal
def applyLUT(self,rLUT=None,bLUT=None,gLUT=None):
"""
**SUMMARY**
Apply LUT allows you to apply a LUT (look up table) to the pixels in a image. Each LUT is just
an array where each index in the array points to its value in the result image. For example
rLUT[0]=255 would change all pixels where the red channel is zero to the value 255.
**PARAMETERS**
* *rLUT* - a tuple or np.array of size (256x1) with dtype=uint8.
* *gLUT* - a tuple or np.array of size (256x1) with dtype=uint8.
* *bLUT* - a tuple or np.array of size (256x1) with dtype=uint8.
.. warning::
The dtype is very important. Will throw the following error without it:
error: dst.size() == src.size() && dst.type() == CV_MAKETYPE(lut.depth(), src.channels())
**RETURNS**
The SimpleCV image remapped using the LUT.
**EXAMPLE**
This example saturates the red channel:
>>> rlut = np.ones((256,1),dtype=uint8)*255
>>> img=img.applyLUT(rLUT=rlut)
NOTE:
-==== BUG NOTE ====-
This method seems to error on the LUT map for some versions of OpenCV.
I am trying to figure out why. -KAS
"""
r = self.getEmpty(1)
g = self.getEmpty(1)
b = self.getEmpty(1)
cv.Split(self.getBitmap(),b,g,r,None);
if(rLUT is not None):
cv.LUT(r,r,cv.fromarray(rLUT))
if(gLUT is not None):
cv.LUT(g,g,cv.fromarray(gLUT))
if(bLUT is not None):
cv.LUT(b,b,cv.fromarray(bLUT))
temp = self.getEmpty()
cv.Merge(b,g,r,None,temp)
return Image(temp)
def _getRawKeypoints(self,thresh=500.00,flavor="SURF", highQuality=1, forceReset=False):
"""
.. _getRawKeypoints:
This method finds keypoints in an image and returns them as the raw keypoints
and keypoint descriptors. When this method is called it caches a the features
and keypoints locally for quick and easy access.
Parameters:
min_quality - The minimum quality metric for SURF descriptors. Good values
range between about 300.00 and 600.00
flavor - a string indicating the method to use to extract features.
A good primer on how feature/keypoint extractiors can be found here:
http://en.wikipedia.org/wiki/Feature_detection_(computer_vision)
http://www.cg.tu-berlin.de/fileadmin/fg144/Courses/07WS/compPhoto/Feature_Detection.pdf
"SURF" - extract the SURF features and descriptors. If you don't know
what to use, use this.
See: http://en.wikipedia.org/wiki/SURF
"STAR" - The STAR feature extraction algorithm
See: http://pr.willowgarage.com/wiki/Star_Detector
"FAST" - The FAST keypoint extraction algorithm
See: http://en.wikipedia.org/wiki/Corner_detection#AST_based_feature_detectors
All the flavour specified below are for OpenCV versions >= 2.4.0 :
"MSER" - Maximally Stable Extremal Regions algorithm
See: http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions
"Dense" - Dense Scale Invariant Feature Transform.
See: http://www.vlfeat.org/api/dsift.html
"ORB" - The Oriented FAST and Rotated BRIEF
See: http://www.willowgarage.com/sites/default/files/orb_final.pdf
"SIFT" - Scale-invariant feature transform
See: http://en.wikipedia.org/wiki/Scale-invariant_feature_transform
"BRISK" - Binary Robust Invariant Scalable Keypoints
See: http://www.asl.ethz.ch/people/lestefan/personal/BRISK
"FREAK" - Fast Retina Keypoints
See: http://www.ivpe.com/freak.htm
Note: It's a keypoint descriptor and not a KeyPoint detector. SIFT KeyPoints
are detected and FERAK is used to extract keypoint descriptor.
highQuality - The SURF descriptor comes in two forms, a vector of 64 descriptor
values and a vector of 128 descriptor values. The latter are "high"
quality descriptors.
forceReset - If keypoints have already been calculated for this image those
keypoints are returned veresus recalculating the values. If
force reset is True we always recalculate the values, otherwise
we will used the cached copies.
Returns:
A tuple of keypoint objects and optionally a numpy array of the descriptors.
Example:
>>> img = Image("aerospace.jpg")
>>> kp,d = img._getRawKeypoints()
Notes:
If you would prefer to work with the raw keypoints and descriptors each image keeps
a local cache of the raw values. These are named:
self._mKeyPoints # A tuple of keypoint objects
See: http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_feature_detectors.html#keypoint-keypoint
self._mKPDescriptors # The descriptor as a floating point numpy array
self._mKPFlavor = "NONE" # The flavor of the keypoints as a string.
See Also:
ImageClass._getRawKeypoints(self,thresh=500.00,forceReset=False,flavor="SURF",highQuality=1)
ImageClass._getFLANNMatches(self,sd,td)
ImageClass.findKeypointMatch(self,template,quality=500.00,minDist=0.2,minMatch=0.4)
ImageClass.drawKeypointMatches(self,template,thresh=500.00,minDist=0.15,width=1)
"""
try:
import cv2
ver = cv2.__version__
new_version = 0
#For OpenCV versions till 2.4.0, cv2.__versions__ are of the form "$Rev: 4557 $"
if not ver.startswith('$Rev:'):
if int(ver.replace('.','0'))>=20400:
new_version = 1
except:
warnings.warn("Can't run Keypoints without OpenCV >= 2.3.0")
return (None, None)
if( forceReset ):
self._mKeyPoints = None
self._mKPDescriptors = None
_detectors = ["SIFT", "SURF", "FAST", "STAR", "FREAK", "ORB", "BRISK", "MSER", "Dense"]
_descriptors = ["SIFT", "SURF", "ORB", "FREAK", "BRISK"]
if flavor not in _detectors:
warnings.warn("Invalid choice of keypoint detector.")
return (None, None)
if self._mKeyPoints != None and self._mKPFlavor == flavor:
return (self._mKeyPoints, self._mKPDescriptors)
if hasattr(cv2, flavor):
if flavor == "SURF":
# cv2.SURF(hessianThreshold, nOctaves, nOctaveLayers, extended, upright)
detector = cv2.SURF(thresh, 4, 2, highQuality, 1)
if new_version == 0:
self._mKeyPoints, self._mKPDescriptors = detector.detect(self.getGrayNumpy(), None, False)
else:
self._mKeyPoints, self._mKPDescriptors = detector.detectAndCompute(self.getGrayNumpy(), None, False)
if len(self._mKeyPoints) == 0:
return (None, None)
if highQuality == 1:
self._mKPDescriptors = self._mKPDescriptors.reshape((-1, 128))
else:
self._mKPDescriptors = self._mKPDescriptors.reshape((-1, 64))
elif flavor in _descriptors:
detector = getattr(cv2, flavor)()
self._mKeyPoints, self._mKPDescriptors = detector.detectAndCompute(self.getGrayNumpy(), None, False)
elif flavor == "MSER":
if hasattr(cv2, "FeatureDetector_create"):
detector = cv2.FeatureDetector_create("MSER")
self._mKeyPoints = detector.detect(self.getGrayNumpy())
elif flavor == "STAR":
detector = cv2.StarDetector()
self._mKeyPoints = detector.detect(self.getGrayNumpy())
elif flavor == "FAST":
if not hasattr(cv2, "FastFeatureDetector"):
warnings.warn("You need OpenCV >= 2.4.0 to support FAST")
return None, None
detector = cv2.FastFeatureDetector(int(thresh), True)
self._mKeyPoints = detector.detect(self.getGrayNumpy(), None)
elif hasattr(cv2, "FeatureDetector_create"):
if flavor in _descriptors:
extractor = cv2.DescriptorExtractor_create(flavor)
if flavor == "FREAK":
if new_version == 0:
warnings.warn("You need OpenCV >= 2.4.3 to support FAST")
flavor = "SIFT"
detector = cv2.FeatureDetector_create(flavor)
self._mKeyPoints = detector.detect(self.getGrayNumpy())
self._mKeyPoints, self._mKPDescriptors = extractor.compute(self.getGrayNumpy(), self._mKeyPoints)
else:
detector = cv2.FeatureDetector_create(flavor)
self._mKeyPoints = detector.detect(self.getGrayNumpy())
else:
warnings.warn("SimpleCV can't seem to find appropriate function with your OpenCV version.")
return (None, None)
return (self._mKeyPoints, self._mKPDescriptors)
def _getFLANNMatches(self,sd,td):
"""
Summary:
This method does a fast local approximate nearest neighbors (FLANN) calculation between two sets
of feature vectors. The result are two numpy arrays the first one is a list of indexes of the
matches and the second one is the match distance value. For the match indices or idx, the index
values correspond to the values of td, and the value in the array is the index in td. I.
I.e. j = idx[i] is where td[i] matches sd[j].
The second numpy array, at the index i is the match distance between td[i] and sd[j].
Lower distances mean better matches.
Parameters:
sd - A numpy array of feature vectors of any size.
td - A numpy array of feature vectors of any size, this vector is used for indexing
and the result arrays will have a length matching this vector.
Returns:
Two numpy arrays, the first one, idx, is the idx of the matches of the vector td with sd.
The second one, dist, is the distance value for the closest match.
Example:
>>> kpt,td = img1._getRawKeypoints() # t is template
>>> kps,sd = img2._getRawKeypoints() # s is source
>>> idx,dist = img1._getFLANNMatches(sd,td)
>>> j = idx[42]
>>> print kps[j] # matches kp 42
>>> print dist[i] # the match quality.
Notes:
If you would prefer to work with the raw keypoints and descriptors each image keeps
a local cache of the raw values. These are named:
self._mKeyPoints # A tuple of keypoint objects
See: http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_feature_detectors.html#keypoint-keypoint
self._mKPDescriptors # The descriptor as a floating point numpy array
self._mKPFlavor = "NONE" # The flavor of the keypoints as a string.
See:
ImageClass._getRawKeypoints(self,thresh=500.00,forceReset=False,flavor="SURF",highQuality=1)
ImageClass._getFLANNMatches(self,sd,td)
ImageClass.drawKeypointMatches(self,template,thresh=500.00,minDist=0.15,width=1)
ImageClass.findKeypoints(self,min_quality=300.00,flavor="SURF",highQuality=False )
ImageClass.findKeypointMatch(self,template,quality=500.00,minDist=0.2,minMatch=0.4)
"""
try:
import cv2
except:
logger.warning("Can't run FLANN Matches without OpenCV >= 2.3.0")
return
FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 4)
flann = cv2.flann_Index(sd, flann_params)
idx, dist = flann.knnSearch(td, 1, params = {}) # bug: need to provide empty dict
del flann
return idx,dist
def drawKeypointMatches(self,template,thresh=500.00,minDist=0.15,width=1):
"""
**SUMMARY**
Draw keypoints draws a side by side representation of two images, calculates
keypoints for both images, determines the keypoint correspondences, and then draws
the correspondences. This method is helpful for debugging keypoint calculations
and also looks really cool :) . The parameters mirror the parameters used
for findKeypointMatches to assist with debugging
**PARAMETERS**
* *template* - A template image.
* *quality* - The feature quality metric. This can be any value between about 300 and 500. Higher
values should return fewer, but higher quality features.
* *minDist* - The value below which the feature correspondence is considered a match. This
is the distance between two feature vectors. Good values are between 0.05 and 0.3
* *width* - The width of the drawn line.
**RETURNS**
A side by side image of the template and source image with each feature correspondence
draw in a different color.
**EXAMPLE**
>>> img = cam.getImage()
>>> template = Image("myTemplate.png")
>>> result = img.drawKeypointMatches(self,template,300.00,0.4):
**NOTES**
If you would prefer to work with the raw keypoints and descriptors each image keeps
a local cache of the raw values. These are named:
self._mKeyPoints # A tuple of keypoint objects
See: http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_feature_detectors.html#keypoint-keypoint
self._mKPDescriptors # The descriptor as a floating point numpy array
self._mKPFlavor = "NONE" # The flavor of the keypoints as a string.
**SEE ALSO**
:py:meth:`drawKeypointMatches`
:py:meth:`findKeypoints`
:py:meth:`findKeypointMatch`
"""
if template == None:
return None
resultImg = template.sideBySide(self,scale=False)
hdif = (self.height-template.height)/2
skp,sd = self._getRawKeypoints(thresh)
tkp,td = template._getRawKeypoints(thresh)
if( td == None or sd == None ):
logger.warning("We didn't get any descriptors. Image might be too uniform or blurry." )
return resultImg
template_points = float(td.shape[0])
sample_points = float(sd.shape[0])
magic_ratio = 1.00
if( sample_points > template_points ):
magic_ratio = float(sd.shape[0])/float(td.shape[0])
idx,dist = self._getFLANNMatches(sd,td) # match our keypoint descriptors
p = dist[:,0]
result = p*magic_ratio < minDist #, = np.where( p*magic_ratio < minDist )
for i in range(0,len(idx)):
if( result[i] ):
pt_a = (tkp[i].pt[1], tkp[i].pt[0]+hdif)
pt_b = (skp[idx[i]].pt[1]+template.width,skp[idx[i]].pt[0])
resultImg.drawLine(pt_a,pt_b,color=Color.getRandom(),thickness=width)
return resultImg
def findKeypointMatch(self,template,quality=500.00,minDist=0.2,minMatch=0.4):
"""
**SUMMARY**
findKeypointMatch allows you to match a template image with another image using
SURF keypoints. The method extracts keypoints from each image, uses the Fast Local
Approximate Nearest Neighbors algorithm to find correspondences between the feature
points, filters the correspondences based on quality, and then, attempts to calculate
a homography between the two images. This homography allows us to draw a matching
bounding box in the source image that corresponds to the template. This method allows
you to perform matchs the ordinarily fail when using the findTemplate method.
This method should be able to handle a reasonable changes in camera orientation and
illumination. Using a template that is close to the target image will yield much
better results.
.. Warning::
This method is only capable of finding one instance of the template in an image.
If more than one instance is visible the homography calculation and the method will
fail.
**PARAMETERS**
* *template* - A template image.
* *quality* - The feature quality metric. This can be any value between about 300 and 500. Higher
values should return fewer, but higher quality features.
* *minDist* - The value below which the feature correspondence is considered a match. This
is the distance between two feature vectors. Good values are between 0.05 and 0.3
* *minMatch* - The percentage of features which must have matches to proceed with homography calculation.
A value of 0.4 means 40% of features must match. Higher values mean better matches
are used. Good values are between about 0.3 and 0.7
**RETURNS**
If a homography (match) is found this method returns a feature set with a single
KeypointMatch feature. If no match is found None is returned.
**EXAMPLE**
>>> template = Image("template.png")
>>> img = camera.getImage()
>>> fs = img.findKeypointMatch(template)
>>> if( fs is not None ):
>>> fs.draw()
>>> img.show()
**NOTES**
If you would prefer to work with the raw keypoints and descriptors each image keeps
a local cache of the raw values. These are named:
| self._mKeyPoints # A Tuple of keypoint objects
| self._mKPDescriptors # The descriptor as a floating point numpy array
| self._mKPFlavor = "NONE" # The flavor of the keypoints as a string.
| `See Documentation <http://opencv.itseez.com/modules/features2d/doc/common_interfaces_of_feature_detectors.html#keypoint-keypoint>`_
**SEE ALSO**
:py:meth:`_getRawKeypoints`
:py:meth:`_getFLANNMatches`
:py:meth:`drawKeypointMatches`
:py:meth:`findKeypoints`
"""
try:
import cv2
except:
warnings.warn("Can't Match Keypoints without OpenCV >= 2.3.0")
return
if template == None:
return None
fs = FeatureSet()
skp,sd = self._getRawKeypoints(quality)
tkp,td = template._getRawKeypoints(quality)
if( skp == None or tkp == None ):
warnings.warn("I didn't get any keypoints. Image might be too uniform or blurry." )
return None
template_points = float(td.shape[0])
sample_points = float(sd.shape[0])
magic_ratio = 1.00
if( sample_points > template_points ):
magic_ratio = float(sd.shape[0])/float(td.shape[0])
idx,dist = self._getFLANNMatches(sd,td) # match our keypoint descriptors
p = dist[:,0]
result = p*magic_ratio < minDist #, = np.where( p*magic_ratio < minDist )
pr = result.shape[0]/float(dist.shape[0])
if( pr > minMatch and len(result)>4 ): # if more than minMatch % matches we go ahead and get the data
lhs = []
rhs = []
for i in range(0,len(idx)):
if( result[i] ):
lhs.append((tkp[i].pt[1], tkp[i].pt[0]))
rhs.append((skp[idx[i]].pt[0], skp[idx[i]].pt[1]))
rhs_pt = np.array(rhs)
lhs_pt = np.array(lhs)
if( len(rhs_pt) < 16 or len(lhs_pt) < 16 ):
return None
homography = []
(homography,mask) = cv2.findHomography(lhs_pt,rhs_pt,cv2.RANSAC, ransacReprojThreshold=1.0 )
w = template.width
h = template.height
pts = np.array([[0,0],[0,h],[w,h],[w,0]], dtype="float32")
pPts = cv2.perspectiveTransform(np.array([pts]), homography)
pt0i = (pPts[0][0][1], pPts[0][0][0])
pt1i = (pPts[0][1][1], pPts[0][1][0])
pt2i = (pPts[0][2][1], pPts[0][2][0])
pt3i = (pPts[0][3][1], pPts[0][3][0])
#construct the feature set and return it.
fs = FeatureSet()
fs.append(KeypointMatch(self,template,(pt0i,pt1i,pt2i,pt3i),homography))
#the homography matrix is necessary for many purposes like image stitching.
#fs.append(homography) # No need to add homography as it is already being
#added in KeyPointMatch class.
return fs
else:
return None
def findKeypoints(self,min_quality=300.00,flavor="SURF",highQuality=False ):
"""
**SUMMARY**
This method finds keypoints in an image and returns them as a feature set.
Keypoints are unique regions in an image that demonstrate some degree of
invariance to changes in camera pose and illumination. They are helpful
for calculating homographies between camera views, object rotations, and
multiple view overlaps.
We support four keypoint detectors and only one form of keypoint descriptors.
Only the surf flavor of keypoint returns feature and descriptors at this time.
**PARAMETERS**
* *min_quality* - The minimum quality metric for SURF descriptors. Good values
range between about 300.00 and 600.00
* *flavor* - a string indicating the method to use to extract features.
A good primer on how feature/keypoint extractiors can be found in
`feature detection on wikipedia <http://en.wikipedia.org/wiki/Feature_detection_(computer_vision)>`_
and
`this tutorial. <http://www.cg.tu-berlin.de/fileadmin/fg144/Courses/07WS/compPhoto/Feature_Detection.pdf>`_
* "SURF" - extract the SURF features and descriptors. If you don't know
what to use, use this.
See: http://en.wikipedia.org/wiki/SURF
* "STAR" - The STAR feature extraction algorithm
See: http://pr.willowgarage.com/wiki/Star_Detector
* "FAST" - The FAST keypoint extraction algorithm
See: http://en.wikipedia.org/wiki/Corner_detection#AST_based_feature_detectors
All the flavour specified below are for OpenCV versions >= 2.4.0 :
* "MSER" - Maximally Stable Extremal Regions algorithm
See: http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions
* "Dense" -
* "ORB" - The Oriented FAST and Rotated BRIEF
See: http://www.willowgarage.com/sites/default/files/orb_final.pdf
* "SIFT" - Scale-invariant feature transform
See: http://en.wikipedia.org/wiki/Scale-invariant_feature_transform
* "BRISK" - Binary Robust Invariant Scalable Keypoints
See: http://www.asl.ethz.ch/people/lestefan/personal/BRISK
* "FREAK" - Fast Retina Keypoints
See: http://www.ivpe.com/freak.htm
Note: It's a keypoint descriptor and not a KeyPoint detector. SIFT KeyPoints
are detected and FERAK is used to extract keypoint descriptor.
* *highQuality* - The SURF descriptor comes in two forms, a vector of 64 descriptor
values and a vector of 128 descriptor values. The latter are "high"
quality descriptors.
**RETURNS**
A feature set of KeypointFeatures. These KeypointFeatures let's you draw each
feature, crop the features, get the feature descriptors, etc.
**EXAMPLE**
>>> img = Image("aerospace.jpg")
>>> fs = img.findKeypoints(flavor="SURF",min_quality=500,highQuality=True)
>>> fs = fs.sortArea()
>>> fs[-1].draw()
>>> img.draw()
**NOTES**
If you would prefer to work with the raw keypoints and descriptors each image keeps
a local cache of the raw values. These are named:
:py:meth:`_getRawKeypoints`
:py:meth:`_getFLANNMatches`
:py:meth:`drawKeypointMatches`
:py:meth:`findKeypoints`
"""
try:
import cv2
except:
logger.warning("Can't use Keypoints without OpenCV >= 2.3.0")
return None
fs = FeatureSet()
kp = []
d = []
if highQuality:
kp,d = self._getRawKeypoints(thresh=min_quality,forceReset=True,flavor=flavor,highQuality=1)
else:
kp,d = self._getRawKeypoints(thresh=min_quality,forceReset=True,flavor=flavor,highQuality=0)
if( flavor in ["ORB", "SIFT", "SURF", "BRISK", "FREAK"] and kp!=None and d !=None ):
for i in range(0,len(kp)):
fs.append(KeyPoint(self,kp[i],d[i],flavor))
elif(flavor in ["FAST", "STAR", "MSER", "Dense"] and kp!=None ):
for i in range(0,len(kp)):
fs.append(KeyPoint(self,kp[i],None,flavor))
else:
logger.warning("ImageClass.Keypoints: I don't know the method you want to use")
return None
return fs
def findMotion(self, previous_frame, window=11, method='BM', aggregate=True):
"""
**SUMMARY**
findMotion performs an optical flow calculation. This method attempts to find
motion between two subsequent frames of an image. You provide it
with the previous frame image and it returns a feature set of motion
fetures that are vectors in the direction of motion.
**PARAMETERS**
* *previous_frame* - The last frame as an Image.
* *window* - The block size for the algorithm. For the the HS and LK methods
this is the regular sample grid at which we return motion samples.
For the block matching method this is the matching window size.
* *method* - The algorithm to use as a string.
Your choices are:
* 'BM' - default block matching robust but slow - if you are unsure use this.
* 'LK' - `Lucas-Kanade method <http://en.wikipedia.org/wiki/Lucas%E2%80%93Kanade_method>`_
* 'HS' - `Horn-Schunck method <http://en.wikipedia.org/wiki/Horn%E2%80%93Schunck_method>`_
* *aggregate* - If aggregate is true, each of our motion features is the average of
motion around the sample grid defined by window. If aggregate is false
we just return the the value as sampled at the window grid interval. For
block matching this flag is ignored.
**RETURNS**
A featureset of motion objects.
**EXAMPLES**
>>> cam = Camera()
>>> img1 = cam.getImage()
>>> img2 = cam.getImage()
>>> motion = img2.findMotion(img1)
>>> motion.draw()
>>> img2.show()
**SEE ALSO**
:py:class:`Motion`
:py:class:`FeatureSet`
"""
try:
import cv2
ver = cv2.__version__
#For OpenCV versions till 2.4.0, cv2.__versions__ are of the form "$Rev: 4557 $"
if not ver.startswith('$Rev:') :
if int(ver.replace('.','0'))>=20400 :
FLAG_VER = 1
if (window > 9):
window = 9
else :
FLAG_VER = 0
except :
FLAG_VER = 0
if( self.width != previous_frame.width or self.height != previous_frame.height):
logger.warning("ImageClass.getMotion: To find motion the current and previous frames must match")
return None
fs = FeatureSet()
max_mag = 0.00
if( method == "LK" or method == "HS" ):
# create the result images.
xf = cv.CreateImage((self.width, self.height), cv.IPL_DEPTH_32F, 1)
yf = cv.CreateImage((self.width, self.height), cv.IPL_DEPTH_32F, 1)
win = (window,window)
if( method == "LK" ):
cv.CalcOpticalFlowLK(self._getGrayscaleBitmap(),previous_frame._getGrayscaleBitmap(),win,xf,yf)
else:
cv.CalcOpticalFlowHS(previous_frame._getGrayscaleBitmap(),self._getGrayscaleBitmap(),0,xf,yf,1.0,(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 10, 0.01))
w = math.floor((float(window))/2.0)
cx = ((self.width-window)/window)+1 #our sample rate
cy = ((self.height-window)/window)+1
vx = 0.00
vy = 0.00
for x in range(0,int(cx)): # go through our sample grid
for y in range(0,int(cy)):
xi = (x*window)+w # calculate the sample point
yi = (y*window)+w
if( aggregate ):
lowx = int(xi-w)
highx = int(xi+w)
lowy = int(yi-w)
highy = int(yi+w)
xderp = xf[lowy:highy,lowx:highx] # get the average x/y components in the output
yderp = yf[lowy:highy,lowx:highx]
vx = np.average(xderp)
vy = np.average(yderp)
else: # other wise just sample
vx = xf[yi,xi]
vy = yf[yi,xi]
mag = (vx*vx)+(vy*vy)
if(mag > max_mag): # calculate the max magnitude for normalizing our vectors
max_mag = mag
fs.append(Motion(self,xi,yi,vx,vy,window)) # add the sample to the feature set
elif( method == "BM"):
# In the interest of keep the parameter list short
# I am pegging these to the window size.
# For versions with OpenCV 2.4.0 and below.
if ( FLAG_VER==0):
block = (window,window) # block size
shift = (int(window*1.2),int(window*1.2)) # how far to shift the block
spread = (window*2,window*2) # the search windows.
wv = (self.width - block[0]) / shift[0] # the result image size
hv = (self.height - block[1]) / shift[1]
xf = cv.CreateMat(hv, wv, cv.CV_32FC1)
yf = cv.CreateMat(hv, wv, cv.CV_32FC1)
cv.CalcOpticalFlowBM(previous_frame._getGrayscaleBitmap(),self._getGrayscaleBitmap(),block,shift,spread,0,xf,yf)
#For versions with OpenCV 2.4.0 and above.
elif ( FLAG_VER==1) :
block = (window,window) # block size
shift = (int(window*0.2),int(window*0.2)) # how far to shift the block
spread = (window,window) # the search windows.
wv = self.width-block[0]+shift[0]
hv = self.height-block[1]+shift[1]
xf = cv.CreateImage((wv,hv), cv.IPL_DEPTH_32F, 1)
yf = cv.CreateImage((wv,hv), cv.IPL_DEPTH_32F, 1)
cv.CalcOpticalFlowBM(previous_frame._getGrayscaleBitmap(),self._getGrayscaleBitmap(),block,shift,spread,0,xf,yf)
for x in range(0,int(wv)): # go through the sample grid
for y in range(0,int(hv)):
xi = (shift[0]*(x))+block[0] #where on the input image the samples live
yi = (shift[1]*(y))+block[1]
vx = xf[y,x] # the result image values
vy = yf[y,x]
fs.append(Motion(self,xi,yi,vx,vy,window)) # add the feature
mag = (vx*vx)+(vy*vy) # same the magnitude
if(mag > max_mag):
max_mag = mag
else:
logger.warning("ImageClass.findMotion: I don't know what algorithm you want to use. Valid method choices are Block Matching -> \"BM\" Horn-Schunck -> \"HS\" and Lucas-Kanade->\"LK\" ")
return None
max_mag = math.sqrt(max_mag) # do the normalization
for f in fs:
f.normalizeTo(max_mag)
return fs
def _generatePalette(self,bins,hue, centroids = None):
"""
**SUMMARY**
This is the main entry point for palette generation. A palette, for our purposes,
is a list of the main colors in an image. Creating a palette with 10 bins, tries
to cluster the colors in rgb space into ten distinct groups. In hue space we only
look at the hue channel. All of the relevant palette data is cached in the image
class.
**PARAMETERS**
* *bins* - an integer number of bins into which to divide the colors in the image.
* *hue* - if hue is true we do only cluster on the image hue values.
* *centroids* - A list of tuples that are the initial k-means estimates. This is handy if you want consisten results from the palettize.
**RETURNS**
Nothing, but creates the image's cached values for:
self._mDoHuePalette
self._mPaletteBins
self._mPalette
self._mPaletteMembers
self._mPalettePercentages
**EXAMPLE**
>>> img._generatePalette(bins=42)
**NOTES**
The hue calculations should be siginificantly faster than the generic RGB calculation as
it works in a one dimensional space. Sometimes the underlying scipy method freaks out
about k-means initialization with the following warning:
UserWarning: One of the clusters is empty. Re-run kmean with a different initialization.
This shouldn't be a real problem.
**SEE ALSO**
ImageClass.getPalette(self,bins=10,hue=False
ImageClass.rePalette(self,palette,hue=False):
ImageClass.drawPaletteColors(self,size=(-1,-1),horizontal=True,bins=10,hue=False)
ImageClass.palettize(self,bins=10,hue=False)
ImageClass.binarizeFromPalette(self, palette_selection)
ImageClass.findBlobsFromPalette(self, palette_selection, dilate = 0, minsize=5, maxsize=0)
"""
if( self._mPaletteBins != bins or
self._mDoHuePalette != hue ):
total = float(self.width*self.height)
percentages = []
result = None
if( not hue ):
pixels = np.array(self.getNumpy()).reshape(-1, 3) #reshape our matrix to 1xN
if( centroids == None ):
result = scv.kmeans(pixels,bins)
else:
if(isinstance(centroids,list)):
centroids = np.array(centroids,dtype='uint8')
result = scv.kmeans(pixels,centroids)
self._mPaletteMembers = scv.vq(pixels,result[0])[0]
else:
hsv = self
if( self._colorSpace != ColorSpace.HSV ):
hsv = self.toHSV()
h = hsv.getEmpty(1)
cv.Split(hsv.getBitmap(),None,None,h,None)
mat = cv.GetMat(h)
pixels = np.array(mat).reshape(-1,1)
if( centroids == None ):
result = scv.kmeans(pixels,bins)
else:
if(isinstance( centroids,list)):
centroids = np.array( centroids,dtype='uint8')
centroids = centroids.reshape(centroids.shape[0],1)
result = scv.kmeans(pixels,centroids)
self._mPaletteMembers = scv.vq(pixels,result[0])[0]
for i in range(0,bins):
count = np.where(self._mPaletteMembers==i)
v = float(count[0].shape[0])/total
percentages.append(v)
self._mDoHuePalette = hue
self._mPaletteBins = bins
self._mPalette = np.array(result[0],dtype='uint8')
self._mPalettePercentages = percentages
def getPalette(self,bins=10,hue=False,centroids=None):
"""
**SUMMARY**
This method returns the colors in the palette of the image. A palette is the
set of the most common colors in an image. This method is helpful for segmentation.
**PARAMETERS**
* *bins* - an integer number of bins into which to divide the colors in the image.
* *hue* - if hue is true we do only cluster on the image hue values.
* *centroids* - A list of tuples that are the initial k-means estimates. This is handy if you want consisten results from the palettize.
**RETURNS**
A numpy array of the BGR color tuples.
**EXAMPLE**
>>> p = img.getPalette(bins=42)
>>> print p[2]
**NOTES**
The hue calculations should be siginificantly faster than the generic RGB calculation as
it works in a one dimensional space. Sometimes the underlying scipy method freaks out
about k-means initialization with the following warning:
.. Warning::
One of the clusters is empty. Re-run kmean with a different initialization.
This shouldn't be a real problem.
**SEE ALSO**
:py:meth:`rePalette`
:py:meth:`drawPaletteColors`
:py:meth:`palettize`
:py:meth:`getPalette`
:py:meth:`binarizeFromPalette`
:py:meth:`findBlobsFromPalette`
"""
self._generatePalette(bins,hue,centroids)
return self._mPalette
def rePalette(self,palette,hue=False):
"""
**SUMMARY**
rePalette takes in the palette from another image and attempts to apply it to this image.
This is helpful if you want to speed up the palette computation for a series of images (like those in a
video stream.
**PARAMETERS**
* *palette* - The pre-computed palette from another image.
* *hue* - Boolean Hue - if hue is True we use a hue palette, otherwise we use a BGR palette.
**RETURNS**
A SimpleCV Image.
**EXAMPLE**
>>> img = Image("lenna")
>>> img2 = Image("logo")
>>> p = img.getPalette()
>>> result = img2.rePalette(p)
>>> result.show()
**SEE ALSO**
:py:meth:`rePalette`
:py:meth:`drawPaletteColors`
:py:meth:`palettize`
:py:meth:`getPalette`
:py:meth:`binarizeFromPalette`
:py:meth:`findBlobsFromPalette`
"""
retVal = None
if(hue):
hsv = self
if( self._colorSpace != ColorSpace.HSV ):
hsv = self.toHSV()
h = hsv.getEmpty(1)
cv.Split(hsv.getBitmap(),None,None,h,None)
mat = cv.GetMat(h)
pixels = np.array(mat).reshape(-1,1)
result = scv.vq(pixels,palette)
derp = palette[result[0]]
retVal = Image(derp[::-1].reshape(self.height,self.width)[::-1])
retVal = retVal.rotate(-90,fixed=False)
retVal._mDoHuePalette = True
retVal._mPaletteBins = len(palette)
retVal._mPalette = palette
retVal._mPaletteMembers = result[0]
else:
result = scv.vq(self.getNumpy().reshape(-1,3),palette)
retVal = Image(palette[result[0]].reshape(self.width,self.height,3))
retVal._mDoHuePalette = False
retVal._mPaletteBins = len(palette)
retVal._mPalette = palette
pixels = np.array(self.getNumpy()).reshape(-1, 3)
retVal._mPaletteMembers = scv.vq(pixels,palette)[0]
percentages = []
total = self.width*self.height
for i in range(0,len(palette)):
count = np.where(self._mPaletteMembers==i)
v = float(count[0].shape[0])/total
percentages.append(v)
self._mPalettePercentages = percentages
return retVal
def drawPaletteColors(self,size=(-1,-1),horizontal=True,bins=10,hue=False):
"""
**SUMMARY**
This method returns the visual representation (swatches) of the palette in an image. The palette
is orientated either horizontally or vertically, and each color is given an area
proportional to the number of pixels that have that color in the image. The palette
is arranged as it is returned from the clustering algorithm. When size is left
to its default value, the palette size will match the size of the
orientation, and then be 10% of the other dimension. E.g. if our image is 640X480 the horizontal
palette will be (640x48) likewise the vertical palette will be (480x64)
If a Hue palette is used this method will return a grayscale palette.
**PARAMETERS**
* *bins* - an integer number of bins into which to divide the colors in the image.
* *hue* - if hue is true we do only cluster on the image hue values.
* *size* - The size of the generated palette as a (width,height) tuple, if left default we select
a size based on the image so it can be nicely displayed with the
image.
* *horizontal* - If true we orientate our palette horizontally, otherwise vertically.
**RETURNS**
A palette swatch image.
**EXAMPLE**
>>> p = img1.drawPaletteColors()
>>> img2 = img1.sideBySide(p,side="bottom")
>>> img2.show()
**NOTES**
The hue calculations should be siginificantly faster than the generic RGB calculation as
it works in a one dimensional space. Sometimes the underlying scipy method freaks out
about k-means initialization with the following warning:
.. Warning::
One of the clusters is empty. Re-run kmean with a different initialization.
This shouldn't be a real problem.
**SEE ALSO**
:py:meth:`rePalette`
:py:meth:`drawPaletteColors`
:py:meth:`palettize`
:py:meth:`getPalette`
:py:meth:`binarizeFromPalette`
:py:meth:`findBlobsFromPalette`
"""
self._generatePalette(bins,hue)
retVal = None
if( not hue ):
if( horizontal ):
if( size[0] == -1 or size[1] == -1 ):
size = (int(self.width),int(self.height*.1))
pal = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
cv.Zero(pal)
idxL = 0
idxH = 0
for i in range(0,bins):
idxH =np.clip(idxH+(self._mPalettePercentages[i]*float(size[0])),0,size[0]-1)
roi = (int(idxL),0,int(idxH-idxL),size[1])
cv.SetImageROI(pal,roi)
color = np.array((float(self._mPalette[i][2]),float(self._mPalette[i][1]),float(self._mPalette[i][0])))
cv.AddS(pal,color,pal)
cv.ResetImageROI(pal)
idxL = idxH
retVal = Image(pal)
else:
if( size[0] == -1 or size[1] == -1 ):
size = (int(self.width*.1),int(self.height))
pal = cv.CreateImage(size, cv.IPL_DEPTH_8U, 3)
cv.Zero(pal)
idxL = 0
idxH = 0
for i in range(0,bins):
idxH =np.clip(idxH+self._mPalettePercentages[i]*size[1],0,size[1]-1)
roi = (0,int(idxL),size[0],int(idxH-idxL))
cv.SetImageROI(pal,roi)
color = np.array((float(self._mPalette[i][2]),float(self._mPalette[i][1]),float(self._mPalette[i][0])))
cv.AddS(pal,color,pal)
cv.ResetImageROI(pal)
idxL = idxH
retVal = Image(pal)
else: # do hue
if( horizontal ):
if( size[0] == -1 or size[1] == -1 ):
size = (int(self.width),int(self.height*.1))
pal = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
cv.Zero(pal)
idxL = 0
idxH = 0
for i in range(0,bins):
idxH =np.clip(idxH+(self._mPalettePercentages[i]*float(size[0])),0,size[0]-1)
roi = (int(idxL),0,int(idxH-idxL),size[1])
cv.SetImageROI(pal,roi)
cv.AddS(pal,float(self._mPalette[i]),pal)
cv.ResetImageROI(pal)
idxL = idxH
retVal = Image(pal)
else:
if( size[0] == -1 or size[1] == -1 ):
size = (int(self.width*.1),int(self.height))
pal = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
cv.Zero(pal)
idxL = 0
idxH = 0
for i in range(0,bins):
idxH =np.clip(idxH+self._mPalettePercentages[i]*size[1],0,size[1]-1)
roi = (0,int(idxL),size[0],int(idxH-idxL))
cv.SetImageROI(pal,roi)
cv.AddS(pal,float(self._mPalette[i]),pal)
cv.ResetImageROI(pal)
idxL = idxH
retVal = Image(pal)
return retVal
def palettize(self,bins=10,hue=False,centroids=None):
"""
**SUMMARY**
This method analyzes an image and determines the most common colors using a k-means algorithm.
The method then goes through and replaces each pixel with the centroid of the clutsters found
by k-means. This reduces the number of colors in an image to the number of bins. This can be particularly
handy for doing segementation based on color.
**PARAMETERS**
* *bins* - an integer number of bins into which to divide the colors in the image.
* *hue* - if hue is true we do only cluster on the image hue values.
**RETURNS**
An image matching the original where each color is replaced with its palette value.
**EXAMPLE**
>>> img2 = img1.palettize()
>>> img2.show()
**NOTES**
The hue calculations should be siginificantly faster than the generic RGB calculation as
it works in a one dimensional space. Sometimes the underlying scipy method freaks out
about k-means initialization with the following warning:
.. Warning::
UserWarning: One of the clusters is empty. Re-run kmean with a different initialization.
This shouldn't be a real problem.
**SEE ALSO**
:py:meth:`rePalette`
:py:meth:`drawPaletteColors`
:py:meth:`palettize`
:py:meth:`getPalette`
:py:meth:`binarizeFromPalette`
:py:meth:`findBlobsFromPalette`
"""
retVal = None
self._generatePalette(bins,hue,centroids)
if( hue ):
derp = self._mPalette[self._mPaletteMembers]
retVal = Image(derp[::-1].reshape(self.height,self.width)[::-1])
retVal = retVal.rotate(-90,fixed=False)
else:
retVal = Image(self._mPalette[self._mPaletteMembers].reshape(self.width,self.height,3))
return retVal
def findBlobsFromPalette(self, palette_selection, dilate = 0, minsize=5, maxsize=0,appx_level=3):
"""
**SUMMARY**
This method attempts to use palettization to do segmentation and behaves similar to the
findBlobs blob in that it returs a feature set of blob objects. Once a palette has been
extracted using getPalette() we can then select colors from that palette to be labeled
white within our blobs.
**PARAMETERS**
* *palette_selection* - color triplets selected from our palette that will serve turned into blobs
These values can either be a 3xN numpy array, or a list of RGB triplets.
* *dilate* - the optional number of dilation operations to perform on the binary image
prior to performing blob extraction.
* *minsize* - the minimum blob size in pixels
* *maxsize* - the maximim blob size in pixels.
* *appx_level* - The blob approximation level - an integer for the maximum distance between the true edge and the
approximation edge - lower numbers yield better approximation.
**RETURNS**
If the method executes successfully a FeatureSet of Blobs is returned from the image. If the method
fails a value of None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> p = img.getPalette()
>>> blobs = img.findBlobsFromPalette( (p[0],p[1],[6]) )
>>> blobs.draw()
>>> img.show()
**SEE ALSO**
:py:meth:`rePalette`
:py:meth:`drawPaletteColors`
:py:meth:`palettize`
:py:meth:`getPalette`
:py:meth:`binarizeFromPalette`
:py:meth:`findBlobsFromPalette`
"""
#we get the palette from find palete
#ASSUME: GET PALLETE WAS CALLED!
bwimg = self.binarizeFromPalette(palette_selection)
if( dilate > 0 ):
bwimg =bwimg.dilate(dilate)
if (maxsize == 0):
maxsize = self.width * self.height
#create a single channel image, thresholded to parameters
blobmaker = BlobMaker()
blobs = blobmaker.extractFromBinary(bwimg,
self, minsize = minsize, maxsize = maxsize,appx_level=appx_level)
if not len(blobs):
return None
return blobs
def binarizeFromPalette(self, palette_selection):
"""
**SUMMARY**
This method uses the color palette to generate a binary (black and white) image. Palaette selection
is a list of color tuples retrieved from img.getPalette(). The provided values will be drawn white
while other values will be black.
**PARAMETERS**
palette_selection - color triplets selected from our palette that will serve turned into blobs
These values can either be a 3xN numpy array, or a list of RGB triplets.
**RETURNS**
This method returns a black and white images, where colors that are close to the colors
in palette_selection are set to white
**EXAMPLE**
>>> img = Image("lenna")
>>> p = img.getPalette()
>>> b = img.binarizeFromPalette( (p[0],p[1],[6]) )
>>> b.show()
**SEE ALSO**
:py:meth:`rePalette`
:py:meth:`drawPaletteColors`
:py:meth:`palettize`
:py:meth:`getPalette`
:py:meth:`binarizeFromPalette`
:py:meth:`findBlobsFromPalette`
"""
#we get the palette from find palete
#ASSUME: GET PALLETE WAS CALLED!
if( self._mPalette == None ):
logger.warning("Image.binarizeFromPalette: No palette exists, call getPalette())")
return None
retVal = None
img = self.palettize(self._mPaletteBins, hue=self._mDoHuePalette)
if( not self._mDoHuePalette ):
npimg = img.getNumpy()
white = np.array([255,255,255])
black = np.array([0,0,0])
for p in palette_selection:
npimg = np.where(npimg != p,npimg,white)
npimg = np.where(npimg != white,black,white)
retVal = Image(npimg)
else:
npimg = img.getNumpy()[:,:,1]
white = np.array([255])
black = np.array([0])
for p in palette_selection:
npimg = np.where(npimg != p,npimg,white)
npimg = np.where(npimg != white,black,white)
retVal = Image(npimg)
return retVal
def skeletonize(self, radius = 5):
"""
**SUMMARY**
Skeletonization is the process of taking in a set of blobs (here blobs are white
on a black background) and finding a squigly line that would be the back bone of
the blobs were they some sort of vertebrate animal. Another way of thinking about
skeletonization is that it finds a series of lines that approximates a blob's shape.
A good summary can be found here:
http://www.inf.u-szeged.hu/~palagyi/skel/skel.html
**PARAMETERS**
* *radius* - an intenger that defines how roughly how wide a blob must be to be added
to the skeleton, lower values give more skeleton lines, higher values give
fewer skeleton lines.
**EXAMPLE**
>>> cam = Camera()
>>> while True:
>>> img = cam.getImage()
>>> b = img.binarize().invert()
>>> s = img.skeletonize()
>>> r = b-s
>>> r.show()
**NOTES**
This code was a suggested improvement by Alex Wiltchko, check out his awesome blog here:
http://alexbw.posterous.com/
"""
img = self.toGray().getNumpy()[:,:,0]
distance_img = ndimage.distance_transform_edt(img)
morph_laplace_img = ndimage.morphological_laplace(distance_img, (radius, radius))
skeleton = morph_laplace_img < morph_laplace_img.min()/2
retVal = np.zeros([self.width,self.height])
retVal[skeleton] = 255
return Image(retVal)
def smartThreshold(self, mask=None, rect=None):
"""
**SUMMARY**
smartThreshold uses a method called grabCut, also called graph cut, to
automagically generate a grayscale mask image. The dumb version of threshold
just uses color, smartThreshold looks at
both color and edges to find a blob. To work smartThreshold needs either a
rectangle that bounds the object you want to find, or a mask. If you use
a rectangle make sure it holds the complete object. In the case of a mask, it
need not be a normal binary mask, it can have the normal white foreground and black
background, but also a light and dark gray values that correspond to areas
that are more likely to be foreground and more likely to be background. These
values can be found in the color class as Color.BACKGROUND, Color.FOREGROUND,
Color.MAYBE_BACKGROUND, and Color.MAYBE_FOREGROUND.
**PARAMETERS**
* *mask* - A grayscale mask the same size as the image using the 4 mask color values
* *rect* - A rectangle tuple of the form (x_position,y_position,width,height)
**RETURNS**
A grayscale image with the foreground / background values assigned to:
* BACKGROUND = (0,0,0)
* MAYBE_BACKGROUND = (64,64,64)
* MAYBE_FOREGROUND = (192,192,192)
* FOREGROUND = (255,255,255)
**EXAMPLE**
>>> img = Image("RatTop.png")
>>> mask = Image((img.width,img.height))
>>> mask.dl().circle((100,100),80,color=Color.MAYBE_BACKGROUND,filled=True)
>>> mask.dl().circle((100,100),60,color=Color.MAYBE_FOREGROUND,filled=True)
>>> mask.dl().circle((100,100),40,color=Color.FOREGROUND,filled=True)
>>> mask = mask.applyLayers()
>>> new_mask = img.smartThreshold(mask=mask)
>>> new_mask.show()
**NOTES**
http://en.wikipedia.org/wiki/Graph_cuts_in_computer_vision
**SEE ALSO**
:py:meth:`smartFindBlobs`
"""
try:
import cv2
except:
logger.warning("Can't Do GrabCut without OpenCV >= 2.3.0")
return
retVal = []
if( mask is not None ):
bmp = mask._getGrayscaleBitmap()
# translate the human readable images to something opencv wants using a lut
LUT = np.zeros((256,1),dtype=uint8)
LUT[255]=1
LUT[64]=2
LUT[192]=3
cv.LUT(bmp,bmp,cv.fromarray(LUT))
mask_in = np.array(cv.GetMat(bmp))
# get our image in a flavor grab cut likes
npimg = np.array(cv.GetMat(self.getBitmap()))
# require by opencv
tmp1 = np.zeros((1, 13 * 5))
tmp2 = np.zeros((1, 13 * 5))
# do the algorithm
cv2.grabCut(npimg,mask_in,None,tmp1,tmp2,10,mode=cv2.GC_INIT_WITH_MASK)
# generate the output image
output = cv.CreateImageHeader((mask_in.shape[1],mask_in.shape[0]),cv.IPL_DEPTH_8U,1)
cv.SetData(output,mask_in.tostring(),mask_in.dtype.itemsize*mask_in.shape[1])
# remap the color space
LUT = np.zeros((256,1),dtype=uint8)
LUT[1]=255
LUT[2]=64
LUT[3]=192
cv.LUT(output,output,cv.fromarray(LUT))
# and create the return value
mask._graybitmap = None # don't ask me why... but this gets corrupted
retVal = Image(output)
elif ( rect is not None ):
npimg = np.array(cv.GetMat(self.getBitmap()))
tmp1 = np.zeros((1, 13 * 5))
tmp2 = np.zeros((1, 13 * 5))
mask = np.zeros((self.height,self.width),dtype='uint8')
cv2.grabCut(npimg,mask,rect,tmp1,tmp2,10,mode=cv2.GC_INIT_WITH_RECT)
bmp = cv.CreateImageHeader((mask.shape[1],mask.shape[0]),cv.IPL_DEPTH_8U,1)
cv.SetData(bmp,mask.tostring(),mask.dtype.itemsize*mask.shape[1])
LUT = np.zeros((256,1),dtype=uint8)
LUT[1]=255
LUT[2]=64
LUT[3]=192
cv.LUT(bmp,bmp,cv.fromarray(LUT))
retVal = Image(bmp)
else:
logger.warning( "ImageClass.findBlobsSmart requires either a mask or a selection rectangle. Failure to provide one of these causes your bytes to splinter and bit shrapnel to hit your pipeline making it asplode in a ball of fire. Okay... not really")
return retVal
def smartFindBlobs(self,mask=None,rect=None,thresh_level=2,appx_level=3):
"""
**SUMMARY**
smartFindBlobs uses a method called grabCut, also called graph cut, to
automagically determine the boundary of a blob in the image. The dumb find
blobs just uses color threshold to find the boundary, smartFindBlobs looks at
both color and edges to find a blob. To work smartFindBlobs needs either a
rectangle that bounds the object you want to find, or a mask. If you use
a rectangle make sure it holds the complete object. In the case of a mask, it
need not be a normal binary mask, it can have the normal white foreground and black
background, but also a light and dark gray values that correspond to areas
that are more likely to be foreground and more likely to be background. These
values can be found in the color class as Color.BACKGROUND, Color.FOREGROUND,
Color.MAYBE_BACKGROUND, and Color.MAYBE_FOREGROUND.
**PARAMETERS**
* *mask* - A grayscale mask the same size as the image using the 4 mask color values
* *rect* - A rectangle tuple of the form (x_position,y_position,width,height)
* *thresh_level* - This represents what grab cut values to use in the mask after the
graph cut algorithm is run,
* 1 - means use the foreground, maybe_foreground, and maybe_background values
* 2 - means use the foreground and maybe_foreground values.
* 3+ - means use just the foreground
* *appx_level* - The blob approximation level - an integer for the maximum distance between the true edge and the
approximation edge - lower numbers yield better approximation.
**RETURNS**
A featureset of blobs. If everything went smoothly only a couple of blobs should
be present.
**EXAMPLE**
>>> img = Image("RatTop.png")
>>> mask = Image((img.width,img.height))
>>> mask.dl().circle((100,100),80,color=Color.MAYBE_BACKGROUND,filled=True
>>> mask.dl().circle((100,100),60,color=Color.MAYBE_FOREGROUND,filled=True)
>>> mask.dl().circle((100,100),40,color=Color.FOREGROUND,filled=True)
>>> mask = mask.applyLayers()
>>> blobs = img.smartFindBlobs(mask=mask)
>>> blobs.draw()
>>> blobs.show()
**NOTES**
http://en.wikipedia.org/wiki/Graph_cuts_in_computer_vision
**SEE ALSO**
:py:meth:`smartThreshold`
"""
result = self.smartThreshold(mask, rect)
binary = None
retVal = None
if result:
if( thresh_level == 1 ):
result = result.threshold(192)
elif( thresh_level == 2):
result = result.threshold(128)
elif( thresh_level > 2 ):
result = result.threshold(1)
bm = BlobMaker()
retVal = bm.extractFromBinary(result,self,appx_level)
return retVal
def threshold(self, value):
"""
**SUMMARY**
We roll old school with this vanilla threshold function. It takes your image
converts it to grayscale, and applies a threshold. Values above the threshold
are white, values below the threshold are black (note this is in contrast to
binarize... which is a stupid function that drives me up a wall). The resulting
black and white image is returned.
**PARAMETERS**
* *value* - the threshold, goes between 0 and 255.
**RETURNS**
A black and white SimpleCV image.
**EXAMPLE**
>>> img = Image("purplemonkeydishwasher.png")
>>> result = img.threshold(42)
**NOTES**
THRESHOLD RULES BINARIZE DROOLS!
**SEE ALSO**
:py:meth:`binarize`
"""
gray = self._getGrayscaleBitmap()
result = self.getEmpty(1)
cv.Threshold(gray, result, value, 255, cv.CV_THRESH_BINARY)
retVal = Image(result)
return retVal
def floodFill(self,points,tolerance=None,color=Color.WHITE,lower=None,upper=None,fixed_range=True):
"""
**SUMMARY**
FloodFill works just like ye olde paint bucket tool in your favorite image manipulation
program. You select a point (or a list of points), a color, and a tolerance, and floodFill will start at that
point, looking for pixels within the tolerance from your intial pixel. If the pixel is in
tolerance, we will convert it to your color, otherwise the method will leave the pixel alone.
The method accepts both single values, and triplet tuples for the tolerance values. If you
require more control over your tolerance you can use the upper and lower values. The fixed
range parameter let's you toggle between setting the tolerance with repect to the seed pixel,
and using a tolerance that is relative to the adjacent pixels. If fixed_range is true the
method will set its tolerance with respect to the seed pixel, otherwise the tolerance will
be with repsect to adjacent pixels.
**PARAMETERS**
* *points* - A tuple, list of tuples, or np.array of seed points for flood fill
* *tolerance* - The color tolerance as a single value or a triplet.
* *color* - The color to replace the floodFill pixels with
* *lower* - If tolerance does not provide enough control you can optionally set the upper and lower values
around the seed pixel. This value can be a single value or a triplet. This will override
the tolerance variable.
* *upper* - If tolerance does not provide enough control you can optionally set the upper and lower values
around the seed pixel. This value can be a single value or a triplet. This will override
the tolerance variable.
* *fixed_range* - If fixed_range is true we use the seed_pixel +/- tolerance
If fixed_range is false, the tolerance is +/- tolerance of the values of
the adjacent pixels to the pixel under test.
**RETURNS**
An Image where the values similar to the seed pixel have been replaced by the input color.
**EXAMPLE**
>>> img = Image("lenna")
>>> img2 = img.floodFill(((10,10),(54,32)),tolerance=(10,10,10),color=Color.RED)
>>> img2.show()
**SEE ALSO**
:py:meth:`floodFillToMask`
:py:meth:`findFloodFillBlobs`
"""
if( isinstance(color,np.ndarray) ):
color = color.tolist()
elif( isinstance(color,dict) ):
color = (color['R'],color['G'],color['B'])
if( isinstance(points,tuple) ):
points = np.array(points)
# first we guess what the user wants to do
# if we get and int/float convert it to a tuple
if( upper is None and lower is None and tolerance is None ):
upper = (0,0,0)
lower = (0,0,0)
if( tolerance is not None and
(isinstance(tolerance,float) or isinstance(tolerance,int))):
tolerance = (int(tolerance),int(tolerance),int(tolerance))
if( lower is not None and
(isinstance(lower,float) or isinstance(lower, int)) ):
lower = (int(lower),int(lower),int(lower))
elif( lower is None ):
lower = tolerance
if( upper is not None and
(isinstance(upper,float) or isinstance(upper, int)) ):
upper = (int(upper),int(upper),int(upper))
elif( upper is None ):
upper = tolerance
if( isinstance(points,tuple) ):
points = np.array(points)
flags = 8
if( fixed_range ):
flags = flags+cv.CV_FLOODFILL_FIXED_RANGE
bmp = self.getEmpty()
cv.Copy(self.getBitmap(),bmp)
if( len(points.shape) != 1 ):
for p in points:
cv.FloodFill(bmp,tuple(p),color,lower,upper,flags)
else:
cv.FloodFill(bmp,tuple(points),color,lower,upper,flags)
retVal = Image(bmp)
return retVal
def floodFillToMask(self, points,tolerance=None,color=Color.WHITE,lower=None,upper=None,fixed_range=True,mask=None):
"""
**SUMMARY**
floodFillToMask works sorta paint bucket tool in your favorite image manipulation
program. You select a point (or a list of points), a color, and a tolerance, and floodFill will start at that
point, looking for pixels within the tolerance from your intial pixel. If the pixel is in
tolerance, we will convert it to your color, otherwise the method will leave the pixel alone.
Unlike regular floodFill, floodFillToMask, will return a binary mask of your flood fill
operation. This is handy if you want to extract blobs from an area, or create a
selection from a region. The method takes in an optional mask. Non-zero values of the mask
act to block the flood fill operations. This is handy if you want to use an edge image
to "stop" the flood fill operation within a particular region.
The method accepts both single values, and triplet tuples for the tolerance values. If you
require more control over your tolerance you can use the upper and lower values. The fixed
range parameter let's you toggle between setting the tolerance with repect to the seed pixel,
and using a tolerance that is relative to the adjacent pixels. If fixed_range is true the
method will set its tolerance with respect to the seed pixel, otherwise the tolerance will
be with repsect to adjacent pixels.
**PARAMETERS**
* *points* - A tuple, list of tuples, or np.array of seed points for flood fill
* *tolerance* - The color tolerance as a single value or a triplet.
* *color* - The color to replace the floodFill pixels with
* *lower* - If tolerance does not provide enough control you can optionally set the upper and lower values
around the seed pixel. This value can be a single value or a triplet. This will override
the tolerance variable.
* *upper* - If tolerance does not provide enough control you can optionally set the upper and lower values
around the seed pixel. This value can be a single value or a triplet. This will override
the tolerance variable.
* *fixed_range* - If fixed_range is true we use the seed_pixel +/- tolerance
If fixed_range is false, the tolerance is +/- tolerance of the values of
the adjacent pixels to the pixel under test.
* *mask* - An optional mask image that can be used to control the flood fill operation.
the output of this function will include the mask data in the input mask.
**RETURNS**
An Image where the values similar to the seed pixel have been replaced by the input color.
**EXAMPLE**
>>> img = Image("lenna")
>>> mask = img.edges()
>>> mask= img.floodFillToMask(((10,10),(54,32)),tolerance=(10,10,10),mask=mask)
>>> mask.show
**SEE ALSO**
:py:meth:`floodFill`
:py:meth:`findFloodFillBlobs`
"""
mask_flag = 255 # flag weirdness
if( isinstance(color,np.ndarray) ):
color = color.tolist()
elif( isinstance(color,dict) ):
color = (color['R'],color['G'],color['B'])
if( isinstance(points,tuple) ):
points = np.array(points)
# first we guess what the user wants to do
# if we get and int/float convert it to a tuple
if( upper is None and lower is None and tolerance is None ):
upper = (0,0,0)
lower = (0,0,0)
if( tolerance is not None and
(isinstance(tolerance,float) or isinstance(tolerance,int))):
tolerance = (int(tolerance),int(tolerance),int(tolerance))
if( lower is not None and
(isinstance(lower,float) or isinstance(lower, int)) ):
lower = (int(lower),int(lower),int(lower))
elif( lower is None ):
lower = tolerance
if( upper is not None and
(isinstance(upper,float) or isinstance(upper, int)) ):
upper = (int(upper),int(upper),int(upper))
elif( upper is None ):
upper = tolerance
if( isinstance(points,tuple) ):
points = np.array(points)
flags = (mask_flag << 8 )+8
if( fixed_range ):
flags = flags + cv.CV_FLOODFILL_FIXED_RANGE
localMask = None
#opencv wants a mask that is slightly larger
if( mask is None ):
localMask = cv.CreateImage((self.width+2,self.height+2), cv.IPL_DEPTH_8U, 1)
cv.Zero(localMask)
else:
localMask = mask.embiggen(size=(self.width+2,self.height+2))._getGrayscaleBitmap()
bmp = self.getEmpty()
cv.Copy(self.getBitmap(),bmp)
if( len(points.shape) != 1 ):
for p in points:
cv.FloodFill(bmp,tuple(p),color,lower,upper,flags,localMask)
else:
cv.FloodFill(bmp,tuple(points),color,lower,upper,flags,localMask)
retVal = Image(localMask)
retVal = retVal.crop(1,1,self.width,self.height)
return retVal
def findBlobsFromMask(self, mask,threshold=128, minsize=10, maxsize=0,appx_level=3 ):
"""
**SUMMARY**
This method acts like findBlobs, but it lets you specifiy blobs directly by
providing a mask image. The mask image must match the size of this image, and
the mask should have values > threshold where you want the blobs selected. This
method can be used with binarize, dialte, erode, floodFill, edges etc to
get really nice segmentation.
**PARAMETERS**
* *mask* - The mask image, areas lighter than threshold will be counted as blobs.
Mask should be the same size as this image.
* *threshold* - A single threshold value used when we binarize the mask.
* *minsize* - The minimum size of the returned blobs.
* *maxsize* - The maximum size of the returned blobs, if none is specified we peg
this to the image size.
* *appx_level* - The blob approximation level - an integer for the maximum distance between the true edge and the
approximation edge - lower numbers yield better approximation.
**RETURNS**
A featureset of blobs. If no blobs are found None is returned.
**EXAMPLE**
>>> img = Image("Foo.png")
>>> mask = img.binarize().dilate(2)
>>> blobs = img.findBlobsFromMask(mask)
>>> blobs.show()
**SEE ALSO**
:py:meth:`findBlobs`
:py:meth:`binarize`
:py:meth:`threshold`
:py:meth:`dilate`
:py:meth:`erode`
"""
if (maxsize == 0):
maxsize = self.width * self.height
#create a single channel image, thresholded to parameters
if( mask.width != self.width or mask.height != self.height ):
logger.warning("ImageClass.findBlobsFromMask - your mask does not match the size of your image")
return None
blobmaker = BlobMaker()
gray = mask._getGrayscaleBitmap()
result = mask.getEmpty(1)
cv.Threshold(gray, result, threshold, 255, cv.CV_THRESH_BINARY)
blobs = blobmaker.extractFromBinary(Image(result), self, minsize = minsize, maxsize = maxsize,appx_level=appx_level)
if not len(blobs):
return None
return FeatureSet(blobs).sortArea()
def findFloodFillBlobs(self,points,tolerance=None,lower=None,upper=None,
fixed_range=True,minsize=30,maxsize=-1):
"""
**SUMMARY**
This method lets you use a flood fill operation and pipe the results to findBlobs. You provide
the points to seed floodFill and the rest is taken care of.
floodFill works just like ye olde paint bucket tool in your favorite image manipulation
program. You select a point (or a list of points), a color, and a tolerance, and floodFill will start at that
point, looking for pixels within the tolerance from your intial pixel. If the pixel is in
tolerance, we will convert it to your color, otherwise the method will leave the pixel alone.
The method accepts both single values, and triplet tuples for the tolerance values. If you
require more control over your tolerance you can use the upper and lower values. The fixed
range parameter let's you toggle between setting the tolerance with repect to the seed pixel,
and using a tolerance that is relative to the adjacent pixels. If fixed_range is true the
method will set its tolerance with respect to the seed pixel, otherwise the tolerance will
be with repsect to adjacent pixels.
**PARAMETERS**
* *points* - A tuple, list of tuples, or np.array of seed points for flood fill.
* *tolerance* - The color tolerance as a single value or a triplet.
* *color* - The color to replace the floodFill pixels with
* *lower* - If tolerance does not provide enough control you can optionally set the upper and lower values
around the seed pixel. This value can be a single value or a triplet. This will override
the tolerance variable.
* *upper* - If tolerance does not provide enough control you can optionally set the upper and lower values
around the seed pixel. This value can be a single value or a triplet. This will override
the tolerance variable.
* *fixed_range* - If fixed_range is true we use the seed_pixel +/- tolerance
If fixed_range is false, the tolerance is +/- tolerance of the values of
the adjacent pixels to the pixel under test.
* *minsize* - The minimum size of the returned blobs.
* *maxsize* - The maximum size of the returned blobs, if none is specified we peg
this to the image size.
**RETURNS**
A featureset of blobs. If no blobs are found None is returned.
An Image where the values similar to the seed pixel have been replaced by the input color.
**EXAMPLE**
>>> img = Image("lenna")
>>> blerbs = img.findFloodFillBlobs(((10,10),(20,20),(30,30)),tolerance=30)
>>> blerbs.show()
**SEE ALSO**
:py:meth:`findBlobs`
:py:meth:`floodFill`
"""
mask = self.floodFillToMask(points,tolerance,color=Color.WHITE,lower=lower,upper=upper,fixed_range=fixed_range)
return self.findBlobsFromMask(mask,minsize,maxsize)
def _doDFT(self, grayscale=False):
"""
**SUMMARY**
This private method peforms the discrete Fourier transform on an input image.
The transform can be applied to a single channel gray image or to each channel of the
image. Each channel generates a 64F 2 channel IPL image corresponding to the real
and imaginary components of the DFT. A list of these IPL images are then cached
in the private member variable _DFT.
**PARAMETERS**
* *grayscale* - If grayscale is True we first covert the image to grayscale, otherwise
we perform the operation on each channel.
**RETURNS**
nothing - but creates a locally cached list of IPL imgaes corresponding to the real
and imaginary components of each channel.
**EXAMPLE**
>>> img = Image('logo.png')
>>> img._doDFT()
>>> img._DFT[0] # get the b channel Re/Im components
**NOTES**
http://en.wikipedia.org/wiki/Discrete_Fourier_transform
http://math.stackexchange.com/questions/1002/fourier-transform-for-dummies
**TO DO**
This method really needs to convert the image to an optimal DFT size.
http://opencv.itseez.com/modules/core/doc/operations_on_arrays.html#getoptimaldftsize
"""
if( grayscale and (len(self._DFT) == 0 or len(self._DFT) == 3)):
self._DFT = []
img = self._getGrayscaleBitmap()
width, height = cv.GetSize(img)
src = cv.CreateImage((width, height), cv.IPL_DEPTH_64F, 2)
dst = cv.CreateImage((width, height), cv.IPL_DEPTH_64F, 2)
data = cv.CreateImage((width, height), cv.IPL_DEPTH_64F, 1)
blank = cv.CreateImage((width, height), cv.IPL_DEPTH_64F, 1)
cv.ConvertScale(img,data,1.0)
cv.Zero(blank)
cv.Merge(data,blank,None,None,src)
cv.Merge(data,blank,None,None,dst)
cv.DFT(src, dst, cv.CV_DXT_FORWARD)
self._DFT.append(dst)
elif( not grayscale and (len(self._DFT) < 2 )):
self._DFT = []
r = self.getEmpty(1)
g = self.getEmpty(1)
b = self.getEmpty(1)
cv.Split(self.getBitmap(),b,g,r,None)
chans = [b,g,r]
width = self.width
height = self.height
data = cv.CreateImage((width, height), cv.IPL_DEPTH_64F, 1)
blank = cv.CreateImage((width, height), cv.IPL_DEPTH_64F, 1)
src = cv.CreateImage((width, height), cv.IPL_DEPTH_64F, 2)
for c in chans:
dst = cv.CreateImage((width, height), cv.IPL_DEPTH_64F, 2)
cv.ConvertScale(c,data,1.0)
cv.Zero(blank)
cv.Merge(data,blank,None,None,src)
cv.Merge(data,blank,None,None,dst)
cv.DFT(src, dst, cv.CV_DXT_FORWARD)
self._DFT.append(dst)
def _getDFTClone(self,grayscale=False):
"""
**SUMMARY**
This method works just like _doDFT but returns a deep copy
of the resulting array which can be used in destructive operations.
**PARAMETERS**
* *grayscale* - If grayscale is True we first covert the image to grayscale, otherwise
we perform the operation on each channel.
**RETURNS**
A deep copy of the cached DFT real/imaginary image list.
**EXAMPLE**
>>> img = Image('logo.png')
>>> myDFT = img._getDFTClone()
>>> SomeCVFunc(myDFT[0])
**NOTES**
http://en.wikipedia.org/wiki/Discrete_Fourier_transform
http://math.stackexchange.com/questions/1002/fourier-transform-for-dummies
**SEE ALSO**
ImageClass._doDFT()
"""
# this is needs to be switched to the optimal
# DFT size for faster processing.
self._doDFT(grayscale)
retVal = []
if(grayscale):
gs = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_64F,2)
cv.Copy(self._DFT[0],gs)
retVal.append(gs)
else:
for img in self._DFT:
temp = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_64F,2)
cv.Copy(img,temp)
retVal.append(temp)
return retVal
def rawDFTImage(self,grayscale=False):
"""
**SUMMARY**
This method returns the **RAW** DFT transform of an image as a list of IPL Images.
Each result image is a two channel 64f image where the first channel is the real
component and the second channel is teh imaginary component. If the operation
is performed on an RGB image and grayscale is False the result is a list of
these images of the form [b,g,r].
**PARAMETERS**
* *grayscale* - If grayscale is True we first covert the image to grayscale, otherwise
we perform the operation on each channel.
**RETURNS**
A list of the DFT images (see above). Note that this is a shallow copy operation.
**EXAMPLE**
>>> img = Image('logo.png')
>>> myDFT = img.rawDFTImage()
>>> for c in myDFT:
>>> #do some operation on the DFT
**NOTES**
http://en.wikipedia.org/wiki/Discrete_Fourier_transform
http://math.stackexchange.com/questions/1002/fourier-transform-for-dummies
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
self._doDFT(grayscale)
return self._DFT
def getDFTLogMagnitude(self,grayscale=False):
"""
**SUMMARY**
This method returns the log value of the magnitude image of the DFT transform. This
method is helpful for examining and comparing the results of DFT transforms. The log
component helps to "squish" the large floating point values into an image that can
be rendered easily.
In the image the low frequency components are in the corners of the image and the high
frequency components are in the center of the image.
**PARAMETERS**
* *grayscale* - if grayscale is True we perform the magnitude operation of the grayscale
image otherwise we perform the operation on each channel.
**RETURNS**
Returns a SimpleCV image corresponding to the log magnitude of the input image.
**EXAMPLE**
>>> img = Image("RedDog2.jpg")
>>> img.getDFTLogMagnitude().show()
>>> lpf = img.lowPassFilter(img.width/10.img.height/10)
>>> lpf.getDFTLogMagnitude().show()
**NOTES**
* http://en.wikipedia.org/wiki/Discrete_Fourier_transform
* http://math.stackexchange.com/questions/1002/fourier-transform-for-dummies
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
dft = self._getDFTClone(grayscale)
chans = []
if( grayscale ):
chans = [self.getEmpty(1)]
else:
chans = [self.getEmpty(1),self.getEmpty(1),self.getEmpty(1)]
data = cv.CreateImage((self.width, self.height), cv.IPL_DEPTH_64F, 1)
blank = cv.CreateImage((self.width, self.height), cv.IPL_DEPTH_64F, 1)
for i in range(0,len(chans)):
cv.Split(dft[i],data,blank,None,None)
cv.Pow( data, data, 2.0)
cv.Pow( blank, blank, 2.0)
cv.Add( data, blank, data, None)
cv.Pow( data, data, 0.5 )
cv.AddS( data, cv.ScalarAll(1.0), data, None ) # 1 + Mag
cv.Log( data, data ) # log(1 + Mag
min, max, pt1, pt2 = cv.MinMaxLoc(data)
cv.Scale(data, data, 1.0/(max-min), 1.0*(-min)/(max-min))
cv.Mul(data,data,data,255.0)
cv.Convert(data,chans[i])
retVal = None
if( grayscale ):
retVal = Image(chans[0])
else:
retVal = self.getEmpty()
cv.Merge(chans[0],chans[1],chans[2],None,retVal)
retVal = Image(retVal)
return retVal
def _boundsFromPercentage(self, floatVal, bound):
return np.clip(int(floatVal*bound),0,bound)
def applyDFTFilter(self,flt,grayscale=False):
"""
**SUMMARY**
This function allows you to apply an arbitrary filter to the DFT of an image.
This filter takes in a gray scale image, whiter values are kept and black values
are rejected. In the DFT image, the lower frequency values are in the corners
of the image, while the higher frequency components are in the center. For example,
a low pass filter has white squares in the corners and is black everywhere else.
**PARAMETERS**
* *grayscale* - if this value is True we perfrom the operation on the DFT of the gray
version of the image and the result is gray image. If grayscale is true
we perform the operation on each channel and the recombine them to create
the result.
* *flt* - A grayscale filter image. The size of the filter must match the size of
the image.
**RETURNS**
A SimpleCV image after applying the filter.
**EXAMPLE**
>>> filter = Image("MyFilter.png")
>>> myImage = Image("MyImage.png")
>>> result = myImage.applyDFTFilter(filter)
>>> result.show()
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
**TODO**
Make this function support a separate filter image for each channel.
"""
if isinstance(flt, DFT):
filteredimage = flt.applyFilter(self, grayscale)
return filteredimage
if( flt.width != self.width and
flt.height != self.height ):
logger.warning("Image.applyDFTFilter - Your filter must match the size of the image")
dft = []
if( grayscale ):
dft = self._getDFTClone(grayscale)
flt = flt._getGrayscaleBitmap()
flt64f = cv.CreateImage((flt.width,flt.height),cv.IPL_DEPTH_64F,1)
cv.ConvertScale(flt,flt64f,1.0)
finalFilt = cv.CreateImage((flt.width,flt.height),cv.IPL_DEPTH_64F,2)
cv.Merge(flt64f,flt64f,None,None,finalFilt)
for d in dft:
cv.MulSpectrums(d,finalFilt,d,0)
else: #break down the filter and then do each channel
dft = self._getDFTClone(grayscale)
flt = flt.getBitmap()
b = cv.CreateImage((flt.width,flt.height),cv.IPL_DEPTH_8U,1)
g = cv.CreateImage((flt.width,flt.height),cv.IPL_DEPTH_8U,1)
r = cv.CreateImage((flt.width,flt.height),cv.IPL_DEPTH_8U,1)
cv.Split(flt,b,g,r,None)
chans = [b,g,r]
for c in range(0,len(chans)):
flt64f = cv.CreateImage((chans[c].width,chans[c].height),cv.IPL_DEPTH_64F,1)
cv.ConvertScale(chans[c],flt64f,1.0)
finalFilt = cv.CreateImage((chans[c].width,chans[c].height),cv.IPL_DEPTH_64F,2)
cv.Merge(flt64f,flt64f,None,None,finalFilt)
cv.MulSpectrums(dft[c],finalFilt,dft[c],0)
return self._inverseDFT(dft)
def _boundsFromPercentage(self, floatVal, bound):
return np.clip(int(floatVal*(bound/2.00)),0,(bound/2))
def highPassFilter(self, xCutoff,yCutoff=None,grayscale=False):
"""
**SUMMARY**
This method applies a high pass DFT filter. This filter enhances
the high frequencies and removes the low frequency signals. This has
the effect of enhancing edges. The frequencies are defined as going between
0.00 and 1.00 and where 0 is the lowest frequency in the image and 1.0 is
the highest possible frequencies. Each of the frequencies are defined
with respect to the horizontal and vertical signal. This filter
isn't perfect and has a harsh cutoff that causes ringing artifacts.
**PARAMETERS**
* *xCutoff* - The horizontal frequency at which we perform the cutoff. A separate
frequency can be used for the b,g, and r signals by providing a
list of values. The frequency is defined between zero to one,
where zero is constant component and 1 is the highest possible
frequency in the image.
* *yCutoff* - The cutoff frequencies in the y direction. If none are provided
we use the same values as provided for x.
* *grayscale* - if this value is True we perfrom the operation on the DFT of the gray
version of the image and the result is gray image. If grayscale is true
we perform the operation on each channel and the recombine them to create
the result.
**RETURNS**
A SimpleCV Image after applying the filter.
**EXAMPLE**
>>> img = Image("SimpleCV/sampleimages/RedDog2.jpg")
>>> img.getDFTLogMagnitude().show()
>>> hpf = img.highPassFilter([0.2,0.1,0.2])
>>> hpf.show()
>>> hpf.getDFTLogMagnitude().show()
**NOTES**
This filter is far from perfect and will generate a lot of ringing artifacts.
* See: http://en.wikipedia.org/wiki/Ringing_(signal)
* See: http://en.wikipedia.org/wiki/High-pass_filter#Image
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
if( isinstance(xCutoff,float) ):
xCutoff = [xCutoff,xCutoff,xCutoff]
if( isinstance(yCutoff,float) ):
yCutoff = [yCutoff,yCutoff,yCutoff]
if(yCutoff is None):
yCutoff = [xCutoff[0],xCutoff[1],xCutoff[2]]
for i in range(0,len(xCutoff)):
xCutoff[i] = self._boundsFromPercentage(xCutoff[i],self.width)
yCutoff[i] = self._boundsFromPercentage(yCutoff[i],self.height)
filter = None
h = self.height
w = self.width
if( grayscale ):
filter = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
cv.Zero(filter)
cv.AddS(filter,255,filter) # make everything white
#now make all of the corners black
cv.Rectangle(filter,(0,0),(xCutoff[0],yCutoff[0]),(0,0,0),thickness=-1) #TL
cv.Rectangle(filter,(0,h-yCutoff[0]),(xCutoff[0],h),(0,0,0),thickness=-1) #BL
cv.Rectangle(filter,(w-xCutoff[0],0),(w,yCutoff[0]),(0,0,0),thickness=-1) #TR
cv.Rectangle(filter,(w-xCutoff[0],h-yCutoff[0]),(w,h),(0,0,0),thickness=-1) #BR
else:
#I need to looking into CVMERGE/SPLIT... I would really need to know
# how much memory we're allocating here
filterB = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
filterG = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
filterR = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
cv.Zero(filterB)
cv.Zero(filterG)
cv.Zero(filterR)
cv.AddS(filterB,255,filterB) # make everything white
cv.AddS(filterG,255,filterG) # make everything whit
cv.AddS(filterR,255,filterR) # make everything white
#now make all of the corners black
temp = [filterB,filterG,filterR]
i = 0
for f in temp:
cv.Rectangle(f,(0,0),(xCutoff[i],yCutoff[i]),0,thickness=-1)
cv.Rectangle(f,(0,h-yCutoff[i]),(xCutoff[i],h),0,thickness=-1)
cv.Rectangle(f,(w-xCutoff[i],0),(w,yCutoff[i]),0,thickness=-1)
cv.Rectangle(f,(w-xCutoff[i],h-yCutoff[i]),(w,h),0,thickness=-1)
i = i+1
filter = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,3)
cv.Merge(filterB,filterG,filterR,None,filter)
scvFilt = Image(filter)
retVal = self.applyDFTFilter(scvFilt,grayscale)
return retVal
def lowPassFilter(self, xCutoff,yCutoff=None,grayscale=False):
"""
**SUMMARY**
This method applies a low pass DFT filter. This filter enhances
the low frequencies and removes the high frequency signals. This has
the effect of reducing noise. The frequencies are defined as going between
0.00 and 1.00 and where 0 is the lowest frequency in the image and 1.0 is
the highest possible frequencies. Each of the frequencies are defined
with respect to the horizontal and vertical signal. This filter
isn't perfect and has a harsh cutoff that causes ringing artifacts.
**PARAMETERS**
* *xCutoff* - The horizontal frequency at which we perform the cutoff. A separate
frequency can be used for the b,g, and r signals by providing a
list of values. The frequency is defined between zero to one,
where zero is constant component and 1 is the highest possible
frequency in the image.
* *yCutoff* - The cutoff frequencies in the y direction. If none are provided
we use the same values as provided for x.
* *grayscale* - if this value is True we perfrom the operation on the DFT of the gray
version of the image and the result is gray image. If grayscale is true
we perform the operation on each channel and the recombine them to create
the result.
**RETURNS**
A SimpleCV Image after applying the filter.
**EXAMPLE**
>>> img = Image("SimpleCV/sampleimages/RedDog2.jpg")
>>> img.getDFTLogMagnitude().show()
>>> lpf = img.lowPassFilter([0.2,0.2,0.05])
>>> lpf.show()
>>> lpf.getDFTLogMagnitude().show()
**NOTES**
This filter is far from perfect and will generate a lot of ringing artifacts.
See: http://en.wikipedia.org/wiki/Ringing_(signal)
See: http://en.wikipedia.org/wiki/Low-pass_filter
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
if( isinstance(xCutoff,float) ):
xCutoff = [xCutoff,xCutoff,xCutoff]
if( isinstance(yCutoff,float) ):
yCutoff = [yCutoff,yCutoff,yCutoff]
if(yCutoff is None):
yCutoff = [xCutoff[0],xCutoff[1],xCutoff[2]]
for i in range(0,len(xCutoff)):
xCutoff[i] = self._boundsFromPercentage(xCutoff[i],self.width)
yCutoff[i] = self._boundsFromPercentage(yCutoff[i],self.height)
filter = None
h = self.height
w = self.width
if( grayscale ):
filter = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
cv.Zero(filter)
#now make all of the corners black
cv.Rectangle(filter,(0,0),(xCutoff[0],yCutoff[0]),255,thickness=-1) #TL
cv.Rectangle(filter,(0,h-yCutoff[0]),(xCutoff[0],h),255,thickness=-1) #BL
cv.Rectangle(filter,(w-xCutoff[0],0),(w,yCutoff[0]),255,thickness=-1) #TR
cv.Rectangle(filter,(w-xCutoff[0],h-yCutoff[0]),(w,h),255,thickness=-1) #BR
else:
#I need to looking into CVMERGE/SPLIT... I would really need to know
# how much memory we're allocating here
filterB = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
filterG = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
filterR = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
cv.Zero(filterB)
cv.Zero(filterG)
cv.Zero(filterR)
#now make all of the corners black
temp = [filterB,filterG,filterR]
i = 0
for f in temp:
cv.Rectangle(f,(0,0),(xCutoff[i],yCutoff[i]),255,thickness=-1)
cv.Rectangle(f,(0,h-yCutoff[i]),(xCutoff[i],h),255,thickness=-1)
cv.Rectangle(f,(w-xCutoff[i],0),(w,yCutoff[i]),255,thickness=-1)
cv.Rectangle(f,(w-xCutoff[i],h-yCutoff[i]),(w,h),255,thickness=-1)
i = i+1
filter = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,3)
cv.Merge(filterB,filterG,filterR,None,filter)
scvFilt = Image(filter)
retVal = self.applyDFTFilter(scvFilt,grayscale)
return retVal
#FUCK! need to decide BGR or RGB
# ((rx_begin,ry_begin)(gx_begin,gy_begin)(bx_begin,by_begin))
# or (x,y)
def bandPassFilter(self, xCutoffLow, xCutoffHigh, yCutoffLow=None, yCutoffHigh=None,grayscale=False):
"""
**SUMMARY**
This method applies a simple band pass DFT filter. This filter enhances
the a range of frequencies and removes all of the other frequencies. This allows
a user to precisely select a set of signals to display . The frequencies are
defined as going between
0.00 and 1.00 and where 0 is the lowest frequency in the image and 1.0 is
the highest possible frequencies. Each of the frequencies are defined
with respect to the horizontal and vertical signal. This filter
isn't perfect and has a harsh cutoff that causes ringing artifacts.
**PARAMETERS**
* *xCutoffLow* - The horizontal frequency at which we perform the cutoff of the low
frequency signals. A separate
frequency can be used for the b,g, and r signals by providing a
list of values. The frequency is defined between zero to one,
where zero is constant component and 1 is the highest possible
frequency in the image.
* *xCutoffHigh* - The horizontal frequency at which we perform the cutoff of the high
frequency signals. Our filter passes signals between xCutoffLow and
xCutoffHigh. A separate frequency can be used for the b, g, and r
channels by providing a
list of values. The frequency is defined between zero to one,
where zero is constant component and 1 is the highest possible
frequency in the image.
* *yCutoffLow* - The low frequency cutoff in the y direction. If none
are provided we use the same values as provided for x.
* *yCutoffHigh* - The high frequency cutoff in the y direction. If none
are provided we use the same values as provided for x.
* *grayscale* - if this value is True we perfrom the operation on the DFT of the gray
version of the image and the result is gray image. If grayscale is true
we perform the operation on each channel and the recombine them to create
the result.
**RETURNS**
A SimpleCV Image after applying the filter.
**EXAMPLE**
>>> img = Image("SimpleCV/sampleimages/RedDog2.jpg")
>>> img.getDFTLogMagnitude().show()
>>> lpf = img.bandPassFilter([0.2,0.2,0.05],[0.3,0.3,0.2])
>>> lpf.show()
>>> lpf.getDFTLogMagnitude().show()
**NOTES**
This filter is far from perfect and will generate a lot of ringing artifacts.
See: http://en.wikipedia.org/wiki/Ringing_(signal)
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
if( isinstance(xCutoffLow,float) ):
xCutoffLow = [xCutoffLow,xCutoffLow,xCutoffLow]
if( isinstance(yCutoffLow,float) ):
yCutoffLow = [yCutoffLow,yCutoffLow,yCutoffLow]
if( isinstance(xCutoffHigh,float) ):
xCutoffHigh = [xCutoffHigh,xCutoffHigh,xCutoffHigh]
if( isinstance(yCutoffHigh,float) ):
yCutoffHigh = [yCutoffHigh,yCutoffHigh,yCutoffHigh]
if(yCutoffLow is None):
yCutoffLow = [xCutoffLow[0],xCutoffLow[1],xCutoffLow[2]]
if(yCutoffHigh is None):
yCutoffHigh = [xCutoffHigh[0],xCutoffHigh[1],xCutoffHigh[2]]
for i in range(0,len(xCutoffLow)):
xCutoffLow[i] = self._boundsFromPercentage(xCutoffLow[i],self.width)
xCutoffHigh[i] = self._boundsFromPercentage(xCutoffHigh[i],self.width)
yCutoffHigh[i] = self._boundsFromPercentage(yCutoffHigh[i],self.height)
yCutoffLow[i] = self._boundsFromPercentage(yCutoffLow[i],self.height)
filter = None
h = self.height
w = self.width
if( grayscale ):
filter = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
cv.Zero(filter)
#now make all of the corners black
cv.Rectangle(filter,(0,0),(xCutoffHigh[0],yCutoffHigh[0]),255,thickness=-1) #TL
cv.Rectangle(filter,(0,h-yCutoffHigh[0]),(xCutoffHigh[0],h),255,thickness=-1) #BL
cv.Rectangle(filter,(w-xCutoffHigh[0],0),(w,yCutoffHigh[0]),255,thickness=-1) #TR
cv.Rectangle(filter,(w-xCutoffHigh[0],h-yCutoffHigh[0]),(w,h),255,thickness=-1) #BR
cv.Rectangle(filter,(0,0),(xCutoffLow[0],yCutoffLow[0]),0,thickness=-1) #TL
cv.Rectangle(filter,(0,h-yCutoffLow[0]),(xCutoffLow[0],h),0,thickness=-1) #BL
cv.Rectangle(filter,(w-xCutoffLow[0],0),(w,yCutoffLow[0]),0,thickness=-1) #TR
cv.Rectangle(filter,(w-xCutoffLow[0],h-yCutoffLow[0]),(w,h),0,thickness=-1) #BR
else:
#I need to looking into CVMERGE/SPLIT... I would really need to know
# how much memory we're allocating here
filterB = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
filterG = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
filterR = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,1)
cv.Zero(filterB)
cv.Zero(filterG)
cv.Zero(filterR)
#now make all of the corners black
temp = [filterB,filterG,filterR]
i = 0
for f in temp:
cv.Rectangle(f,(0,0),(xCutoffHigh[i],yCutoffHigh[i]),255,thickness=-1) #TL
cv.Rectangle(f,(0,h-yCutoffHigh[i]),(xCutoffHigh[i],h),255,thickness=-1) #BL
cv.Rectangle(f,(w-xCutoffHigh[i],0),(w,yCutoffHigh[i]),255,thickness=-1) #TR
cv.Rectangle(f,(w-xCutoffHigh[i],h-yCutoffHigh[i]),(w,h),255,thickness=-1) #BR
cv.Rectangle(f,(0,0),(xCutoffLow[i],yCutoffLow[i]),0,thickness=-1) #TL
cv.Rectangle(f,(0,h-yCutoffLow[i]),(xCutoffLow[i],h),0,thickness=-1) #BL
cv.Rectangle(f,(w-xCutoffLow[i],0),(w,yCutoffLow[i]),0,thickness=-1) #TR
cv.Rectangle(f,(w-xCutoffLow[i],h-yCutoffLow[i]),(w,h),0,thickness=-1) #BR
i = i+1
filter = cv.CreateImage((self.width,self.height),cv.IPL_DEPTH_8U,3)
cv.Merge(filterB,filterG,filterR,None,filter)
scvFilt = Image(filter)
retVal = self.applyDFTFilter(scvFilt,grayscale)
return retVal
def _inverseDFT(self,input):
"""
**SUMMARY**
**PARAMETERS**
**RETURNS**
**EXAMPLE**
NOTES:
SEE ALSO:
"""
# a destructive IDFT operation for internal calls
w = input[0].width
h = input[0].height
if( len(input) == 1 ):
cv.DFT(input[0], input[0], cv.CV_DXT_INV_SCALE)
result = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
data = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
blank = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
cv.Split(input[0],data,blank,None,None)
min, max, pt1, pt2 = cv.MinMaxLoc(data)
denom = max-min
if(denom == 0):
denom = 1
cv.Scale(data, data, 1.0/(denom), 1.0*(-min)/(denom))
cv.Mul(data,data,data,255.0)
cv.Convert(data,result)
retVal = Image(result)
else: # DO RGB separately
results = []
data = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
blank = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
for i in range(0,len(input)):
cv.DFT(input[i], input[i], cv.CV_DXT_INV_SCALE)
result = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
cv.Split( input[i],data,blank,None,None)
min, max, pt1, pt2 = cv.MinMaxLoc(data)
denom = max-min
if(denom == 0):
denom = 1
cv.Scale(data, data, 1.0/(denom), 1.0*(-min)/(denom))
cv.Mul(data,data,data,255.0) # this may not be right
cv.Convert(data,result)
results.append(result)
retVal = cv.CreateImage((w,h),cv.IPL_DEPTH_8U,3)
cv.Merge(results[0],results[1],results[2],None,retVal)
retVal = Image(retVal)
del input
return retVal
def InverseDFT(self, raw_dft_image):
"""
**SUMMARY**
This method provides a way of performing an inverse discrete Fourier transform
on a real/imaginary image pair and obtaining the result as a SimpleCV image. This
method is helpful if you wish to perform custom filter development.
**PARAMETERS**
* *raw_dft_image* - A list object with either one or three IPL images. Each image should
have a 64f depth and contain two channels (the real and the imaginary).
**RETURNS**
A simpleCV image.
**EXAMPLE**
Note that this is an example, I don't recommend doing this unless you know what
you are doing.
>>> raw = img.getRawDFT()
>>> cv.SomeOperation(raw)
>>> result = img.InverseDFT(raw)
>>> result.show()
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
input = []
w = raw_dft_image[0].width
h = raw_dft_image[0].height
if(len(raw_dft_image) == 1):
gs = cv.CreateImage((w,h),cv.IPL_DEPTH_64F,2)
cv.Copy(self._DFT[0],gs)
input.append(gs)
else:
for img in raw_dft_image:
temp = cv.CreateImage((w,h),cv.IPL_DEPTH_64F,2)
cv.Copy(img,temp)
input.append(img)
if( len(input) == 1 ):
cv.DFT(input[0], input[0], cv.CV_DXT_INV_SCALE)
result = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
data = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
blank = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
cv.Split(input[0],data,blank,None,None)
min, max, pt1, pt2 = cv.MinMaxLoc(data)
denom = max-min
if(denom == 0):
denom = 1
cv.Scale(data, data, 1.0/(denom), 1.0*(-min)/(denom))
cv.Mul(data,data,data,255.0)
cv.Convert(data,result)
retVal = Image(result)
else: # DO RGB separately
results = []
data = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
blank = cv.CreateImage((w,h), cv.IPL_DEPTH_64F, 1)
for i in range(0,len(raw_dft_image)):
cv.DFT(input[i], input[i], cv.CV_DXT_INV_SCALE)
result = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
cv.Split( input[i],data,blank,None,None)
min, max, pt1, pt2 = cv.MinMaxLoc(data)
denom = max-min
if(denom == 0):
denom = 1
cv.Scale(data, data, 1.0/(denom), 1.0*(-min)/(denom))
cv.Mul(data,data,data,255.0) # this may not be right
cv.Convert(data,result)
results.append(result)
retVal = cv.CreateImage((w,h),cv.IPL_DEPTH_8U,3)
cv.Merge(results[0],results[1],results[2],None,retVal)
retVal = Image(retVal)
return retVal
def applyButterworthFilter(self,dia=400,order=2,highpass=False,grayscale=False):
"""
**SUMMARY**
Creates a butterworth filter of 64x64 pixels, resizes it to fit
image, applies DFT on image using the filter.
Returns image with DFT applied on it
**PARAMETERS**
* *dia* - int Diameter of Butterworth low pass filter
* *order* - int Order of butterworth lowpass filter
* *highpass*: BOOL True: highpass filterm False: lowpass filter
* *grayscale*: BOOL
**EXAMPLE**
>>> im = Image("lenna")
>>> img = im.applyButterworth(dia=400,order=2,highpass=True,grayscale=False)
Output image: http://i.imgur.com/5LS3e.png
>>> img = im.applyButterworth(dia=400,order=2,highpass=False,grayscale=False)
Output img: http://i.imgur.com/QlCAY.png
>>> im = Image("grayscale_lenn.png") #take image from here: http://i.imgur.com/O0gZn.png
>>> img = im.applyButterworth(dia=400,order=2,highpass=True,grayscale=True)
Output img: http://i.imgur.com/BYYnp.png
>>> img = im.applyButterworth(dia=400,order=2,highpass=False,grayscale=True)
Output img: http://i.imgur.com/BYYnp.png
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
#reimplemented with faster, vectorized filter kernel creation
w,h = self.size()
intensity_scale = 2**8 - 1 #for now 8-bit
sz_x = 64 #for now constant, symmetric
sz_y = 64 #for now constant, symmetric
x0 = sz_x/2.0 #for now, on center
y0 = sz_y/2.0 #for now, on center
#efficient "vectorized" computation
X, Y = np.meshgrid(np.arange(sz_x), np.arange(sz_y))
D = np.sqrt((X-x0)**2+(Y-y0)**2)
flt = intensity_scale/(1.0 + (D/dia)**(order*2))
if highpass: #then invert the filter
flt = intensity_scale - flt
flt = Image(flt) #numpy arrays are in row-major form...doesn't matter for symmetric filter
flt_re = flt.resize(w,h)
img = self.applyDFTFilter(flt_re,grayscale)
return img
def applyGaussianFilter(self, dia=400, highpass=False, grayscale=False):
"""
**SUMMARY**
Creates a gaussian filter of 64x64 pixels, resizes it to fit
image, applies DFT on image using the filter.
Returns image with DFT applied on it
**PARAMETERS**
* *dia* - int - diameter of Gaussian filter
* *highpass*: BOOL True: highpass filter False: lowpass filter
* *grayscale*: BOOL
**EXAMPLE**
>>> im = Image("lenna")
>>> img = im.applyGaussianfilter(dia=400,highpass=True,grayscale=False)
Output image: http://i.imgur.com/DttJv.png
>>> img = im.applyGaussianfilter(dia=400,highpass=False,grayscale=False)
Output img: http://i.imgur.com/PWn4o.png
>>> im = Image("grayscale_lenn.png") #take image from here: http://i.imgur.com/O0gZn.png
>>> img = im.applyGaussianfilter(dia=400,highpass=True,grayscale=True)
Output img: http://i.imgur.com/9hX5J.png
>>> img = im.applyGaussianfilter(dia=400,highpass=False,grayscale=True)
Output img: http://i.imgur.com/MXI5T.png
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
#reimplemented with faster, vectorized filter kernel creation
w,h = self.size()
intensity_scale = 2**8 - 1 #for now 8-bit
sz_x = 64 #for now constant, symmetric
sz_y = 64 #for now constant, symmetric
x0 = sz_x/2.0 #for now, on center
y0 = sz_y/2.0 #for now, on center
#efficient "vectorized" computation
X, Y = np.meshgrid(np.arange(sz_x), np.arange(sz_y))
D = np.sqrt((X-x0)**2+(Y-y0)**2)
flt = intensity_scale*np.exp(-0.5*(D/dia)**2)
if highpass: #then invert the filter
flt = intensity_scale - flt
flt = Image(flt) #numpy arrays are in row-major form...doesn't matter for symmetric filter
flt_re = flt.resize(w,h)
img = self.applyDFTFilter(flt_re,grayscale)
return img
def applyUnsharpMask(self,boost=1,dia=400,grayscale=False):
"""
**SUMMARY**
This method applies unsharp mask or highboost filtering
on image depending upon the boost value provided.
DFT is applied on image using gaussian lowpass filter.
A mask is created subtracting the DFT image from the original
iamge. And then mask is added in the image to sharpen it.
unsharp masking => image + mask
highboost filtering => image + (boost)*mask
**PARAMETERS**
* *boost* - int boost = 1 => unsharp masking, boost > 1 => highboost filtering
* *dia* - int Diameter of Gaussian low pass filter
* *grayscale* - BOOL
**EXAMPLE**
Gaussian Filters:
>>> im = Image("lenna")
>>> img = im.applyUnsharpMask(2,grayscale=False) #highboost filtering
output image: http://i.imgur.com/A1pZf.png
>>> img = im.applyUnsharpMask(1,grayscale=False) #unsharp masking
output image: http://i.imgur.com/smCdL.png
>>> im = Image("grayscale_lenn.png") #take image from here: http://i.imgur.com/O0gZn.png
>>> img = im.applyUnsharpMask(2,grayscale=True) #highboost filtering
output image: http://i.imgur.com/VtGzl.png
>>> img = im.applyUnsharpMask(1,grayscale=True) #unsharp masking
output image: http://i.imgur.com/bywny.png
**SEE ALSO**
:py:meth:`rawDFTImage`
:py:meth:`getDFTLogMagnitude`
:py:meth:`applyDFTFilter`
:py:meth:`highPassFilter`
:py:meth:`lowPassFilter`
:py:meth:`bandPassFilter`
:py:meth:`InverseDFT`
:py:meth:`applyButterworthFilter`
:py:meth:`InverseDFT`
:py:meth:`applyGaussianFilter`
:py:meth:`applyUnsharpMask`
"""
if boost < 0:
print "boost >= 1"
return None
lpIm = self.applyGaussianFilter(dia=dia,grayscale=grayscale,highpass=False)
im = Image(self.getBitmap())
mask = im - lpIm
img = im
for i in range(boost):
img = img + mask
return img
def listHaarFeatures(self):
'''
This is used to list the built in features available for HaarCascade feature
detection. Just run this function as:
>>> img.listHaarFeatures()
Then use one of the file names returned as the input to the findHaarFeature()
function. So you should get a list, more than likely you will see face.xml,
to use it then just
>>> img.findHaarFeatures('face.xml')
'''
features_directory = os.path.join(LAUNCH_PATH, 'Features','HaarCascades')
features = os.listdir(features_directory)
print features
def _CopyAvg(self, src, dst,roi, levels, levels_f, mode):
'''
Take the value in an ROI, calculate the average / peak hue
and then set the output image roi to the value.
'''
if( mode ): # get the peak hue for an area
h = src[roi[0]:roi[0]+roi[2],roi[1]:roi[1]+roi[3]].hueHistogram()
myHue = np.argmax(h)
C = (float(myHue),float(255),float(255),float(0))
cv.SetImageROI(dst,roi)
cv.AddS(dst,c,dst)
cv.ResetImageROI(dst)
else: # get the average value for an area optionally set levels
cv.SetImageROI(src.getBitmap(),roi)
cv.SetImageROI(dst,roi)
avg = cv.Avg(src.getBitmap())
avg = (float(avg[0]),float(avg[1]),float(avg[2]),0)
if(levels is not None):
avg = (int(avg[0]/levels)*levels_f,int(avg[1]/levels)*levels_f,int(avg[2]/levels)*levels_f,0)
cv.AddS(dst,avg,dst)
cv.ResetImageROI(src.getBitmap())
cv.ResetImageROI(dst)
def pixelize(self, block_size = 10, region = None, levels=None, doHue=False):
"""
**SUMMARY**
Pixelation blur, like the kind used to hide naughty bits on your favorite tv show.
**PARAMETERS**
* *block_size* - the blur block size in pixels, an integer is an square blur, a tuple is rectangular.
* *region* - do the blur in a region in format (x_position,y_position,width,height)
* *levels* - the number of levels per color channel. This makes the image look like an 8-bit video game.
* *doHue* - If this value is true we calculate the peak hue for the area, not the
average color for the area.
**RETURNS**
Returns the image with the pixelation blur applied.
**EXAMPLE**
>>> img = Image("lenna")
>>> result = img.pixelize( 16, (200,180,250,250), levels=4)
>>> img.show()
"""
if( isinstance(block_size, int) ):
block_size = (block_size,block_size)
retVal = self.getEmpty()
levels_f = 0.00
if( levels is not None ):
levels = 255/int(levels)
if(levels <= 1 ):
levels = 2
levels_f = float(levels)
if( region is not None ):
cv.Copy(self.getBitmap(), retVal)
cv.SetImageROI(retVal,region)
cv.Zero(retVal)
cv.ResetImageROI(retVal)
xs = region[0]
ys = region[1]
w = region[2]
h = region[3]
else:
xs = 0
ys = 0
w = self.width
h = self.height
#if( region is None ):
hc = w / block_size[0] #number of horizontal blocks
vc = h / block_size[1] #number of vertical blocks
#when we fit in the blocks, we're going to spread the round off
#over the edges 0->x_0, 0->y_0 and x_0+hc*block_size
x_lhs = int(np.ceil(float(w%block_size[0])/2.0)) # this is the starting point
y_lhs = int(np.ceil(float(h%block_size[1])/2.0))
x_rhs = int(np.floor(float(w%block_size[0])/2.0)) # this is the starting point
y_rhs = int(np.floor(float(h%block_size[1])/2.0))
x_0 = xs+x_lhs
y_0 = ys+y_lhs
x_f = (x_0+(block_size[0]*hc)) #this would be the end point
y_f = (y_0+(block_size[1]*vc))
for i in range(0,hc):
for j in range(0,vc):
xt = x_0+(block_size[0]*i)
yt = y_0+(block_size[1]*j)
roi = (xt,yt,block_size[0],block_size[1])
self._CopyAvg(self,retVal,roi,levels,levels_f,doHue)
if( x_lhs > 0 ): # add a left strip
xt = xs
wt = x_lhs
ht = block_size[1]
for j in range(0,vc):
yt = y_0+(j*block_size[1])
roi = (xt,yt,wt,ht)
self._CopyAvg(self,retVal,roi,levels,levels_f,doHue)
if( x_rhs > 0 ): # add a right strip
xt = (x_0+(block_size[0]*hc))
wt = x_rhs
ht = block_size[1]
for j in range(0,vc):
yt = y_0+(j*block_size[1])
roi = (xt,yt,wt,ht)
self._CopyAvg(self,retVal,roi,levels,levels_f,doHue)
if( y_lhs > 0 ): # add a left strip
yt = ys
ht = y_lhs
wt = block_size[0]
for i in range(0,hc):
xt = x_0+(i*block_size[0])
roi = (xt,yt,wt,ht)
self._CopyAvg(self,retVal,roi,levels,levels_f,doHue)
if( y_rhs > 0 ): # add a right strip
yt = (y_0+(block_size[1]*vc))
ht = y_rhs
wt = block_size[0]
for i in range(0,hc):
xt = x_0+(i*block_size[0])
roi = (xt,yt,wt,ht)
self._CopyAvg(self,retVal,roi,levels,levels_f,doHue)
#now the corner cases
if(x_lhs > 0 and y_lhs > 0 ):
roi = (xs,ys,x_lhs,y_lhs)
self._CopyAvg(self,retVal,roi,levels,levels_f,doHue)
if(x_rhs > 0 and y_rhs > 0 ):
roi = (x_f,y_f,x_rhs,y_rhs)
self._CopyAvg(self,retVal,roi,levels,levels_f,doHue)
if(x_lhs > 0 and y_rhs > 0 ):
roi = (xs,y_f,x_lhs,y_rhs)
self._CopyAvg(self,retVal,roi,levels,levels_f,doHue)
if(x_rhs > 0 and y_lhs > 0 ):
roi = (x_f,ys,x_rhs,y_lhs)
self._CopyAvg(self,retVal,roi,levels,levels_f,doHue)
if(doHue):
cv.CvtColor(retVal,retVal,cv.CV_HSV2BGR)
return Image(retVal)
def anonymize(self, block_size=10, features=None, transform=None):
"""
**SUMMARY**
Anonymize, for additional privacy to images.
**PARAMETERS**
* *features* - A list with the Haar like feature cascades that should be matched.
* *block_size* - The size of the blocks for the pixelize function.
* *transform* - A function, to be applied to the regions matched instead of pixelize.
* This function must take two arguments: the image and the region it'll be applied to,
* as in region = (x, y, width, height).
**RETURNS**
Returns the image with matching regions pixelated.
**EXAMPLE**
>>> img = Image("lenna")
>>> anonymous = img.anonymize()
>>> anonymous.show()
>>> def my_function(img, region):
>>> x, y, width, height = region
>>> img = img.crop(x, y, width, height)
>>> return img
>>>
>>>img = Image("lenna")
>>>transformed = img.anonymize(transform = my_function)
"""
regions = []
if features is None:
regions.append(self.findHaarFeatures("face"))
regions.append(self.findHaarFeatures("profile"))
else:
for feature in features:
regions.append(self.findHaarFeatures(feature))
found = [f for f in regions if f is not None]
img = self.copy()
if found:
for feature_set in found:
for region in feature_set:
rect = (region.topLeftCorner()[0], region.topLeftCorner()[1],
region.width(), region.height())
if transform is None:
img = img.pixelize(block_size=block_size, region=rect)
else:
img = transform(img, rect)
return img
def fillHoles(self):
"""
**SUMMARY**
Fill holes on a binary image by closing the contours
**PARAMETERS**
* *img* - a binary image
**RETURNS**
The image with the holes filled
**EXAMPLE**
>>> img = Image("SimpleCV")
#todo Add noise and showcase the image
"""
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
# res = cv2.morphologyEx(self.getGrayNumpy(),cv2.MORPH_OPEN,kernel)
# return res
des = cv2.bitwise_not(self.getGrayNumpy())
return cv2.inPaint(des)
contour,hier = cv2.findContours(des,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contour:
cv2.drawContours(des,[cnt],0,255,-1)
print 'yep'
gray = cv2.bitwise_not(des)
return gray
def edgeIntersections(self, pt0, pt1, width=1, canny1=0, canny2=100):
"""
**SUMMARY**
Find the outermost intersection of a line segment and the edge image and return
a list of the intersection points. If no intersections are found the method returns
an empty list.
**PARAMETERS**
* *pt0* - an (x,y) tuple of one point on the intersection line.
* *pt1* - an (x,y) tuple of the second point on the intersection line.
* *width* - the width of the line to use. This approach works better when
for cases where the edges on an object are not always closed
and may have holes.
* *canny1* - the lower bound of the Canny edge detector parameters.
* *canny2* - the upper bound of the Canny edge detector parameters.
**RETURNS**
A list of two (x,y) tuples or an empty list.
**EXAMPLE**
>>> img = Image("SimpleCV")
>>> a = (25,100)
>>> b = (225,110)
>>> pts = img.edgeIntersections(a,b,width=3)
>>> e = img.edges(0,100)
>>> e.drawLine(a,b,color=Color.RED)
>>> e.drawCircle(pts[0],10,color=Color.GREEN)
>>> e.drawCircle(pts[1],10,color=Color.GREEN)
>>> e.show()
img = Image("SimpleCV")
a = (25,100)
b = (225,100)
pts = img.edgeIntersections(a,b,width=3)
e = img.edges(0,100)
e.drawLine(a,b,color=Color.RED)
e.drawCircle(pts[0],10,color=Color.GREEN)
e.drawCircle(pts[1],10,color=Color.GREEN)
e.show()
"""
w = abs(pt0[0]-pt1[0])
h = abs(pt0[1]-pt1[1])
x = np.min([pt0[0],pt1[0]])
y = np.min([pt0[1],pt1[1]])
if( w <= 0 ):
w = width
x = np.clip(x-(width/2),0,x-(width/2))
if( h <= 0 ):
h = width
y = np.clip(y-(width/2),0,y-(width/2))
#got some corner cases to catch here
p0p = np.array([(pt0[0]-x,pt0[1]-y)])
p1p = np.array([(pt1[0]-x,pt1[1]-y)])
edges = self.crop(x,y,w,h)._getEdgeMap(canny1, canny2)
line = cv.CreateImage((w,h),cv.IPL_DEPTH_8U,1)
cv.Zero(line)
cv.Line(line,((pt0[0]-x),(pt0[1]-y)),((pt1[0]-x),(pt1[1]-y)),cv.Scalar(255.00),width,8)
cv.Mul(line,edges,line)
intersections = uint8(np.array(cv.GetMat(line)).transpose())
(xs,ys) = np.where(intersections==255)
points = zip(xs,ys)
if(len(points)==0):
return [None,None]
A = np.argmin(spsd.cdist(p0p,points,'cityblock'))
B = np.argmin(spsd.cdist(p1p,points,'cityblock'))
ptA = (int(xs[A]+x),int(ys[A]+y))
ptB = (int(xs[B]+x),int(ys[B]+y))
# we might actually want this to be list of all the points
return [ptA, ptB]
def fitContour(self, initial_curve, window=(11,11), params=(0.1,0.1,0.1),doAppx=True,appx_level=1):
"""
**SUMMARY**
This method tries to fit a list of points to lines in the image. The list of points
is a list of (x,y) tuples that are near (i.e. within the window size) of the line
you want to fit in the image. This method uses a binary such as the result of calling
edges.
This method is based on active contours. Please see this reference:
http://en.wikipedia.org/wiki/Active_contour_model
**PARAMETERS**
* *initial_curve* - region of the form [(x0,y0),(x1,y1)...] that are the initial conditions to fit.
* *window* - the search region around each initial point to look for a solution.
* *params* - The alpha, beta, and gamma parameters for the active contours
algorithm as a list [alpha,beta,gamma].
* *doAppx* - post process the snake into a polynomial approximation. Basically
this flag will clean up the output of the contour algorithm.
* *appx_level* - how much to approximate the snake, higher numbers mean more approximation.
**DISCUSSION**
THIS SECTION IS QUOTED FROM: http://users.ecs.soton.ac.uk/msn/book/new_demo/Snakes/
There are three components to the Energy Function:
* Continuity
* Curvature
* Image (Gradient)
Each Weighted by Specified Parameter:
Total Energy = Alpha*Continuity + Beta*Curvature + Gamma*Image
Choose different values dependent on Feature to extract:
* Set alpha high if there is a deceptive Image Gradient
* Set beta high if smooth edged Feature, low if sharp edges
* Set gamma high if contrast between Background and Feature is low
**RETURNS**
A list of (x,y) tuples that approximate the curve. If you do not use
approximation the list should be the same length as the input list length.
**EXAMPLE**
>>> img = Image("lenna")
>>> edges = img.edges(t1=120,t2=155)
>>> guess = [(311,284),(313,270),(320,259),(330,253),(347,245)]
>>> result = edges.fitContour(guess)
>>> img.drawPoints(guess,color=Color.RED)
>>> img.drawPoints(result,color=Color.GREEN)
>>> img.show()
"""
alpha = [params[0]]
beta= [params[1]]
gamma = [params[2]]
if( window[0]%2 == 0 ):
window = (window[0]+1,window[1])
logger.warn("Yo dawg, just a heads up, snakeFitPoints wants an odd window size. I fixed it for you, but you may want to take a look at your code.")
if( window[1]%2 == 0 ):
window = (window[0],window[1]+1)
logger.warn("Yo dawg, just a heads up, snakeFitPoints wants an odd window size. I fixed it for you, but you may want to take a look at your code.")
raw = cv.SnakeImage(self._getGrayscaleBitmap(),initial_curve,alpha,beta,gamma,window,(cv.CV_TERMCRIT_ITER,10,0.01))
if( doAppx ):
try:
import cv2
except:
logger.warning("Can't Do snakeFitPoints without OpenCV >= 2.3.0")
return
appx = cv2.approxPolyDP(np.array([raw],'float32'),appx_level,True)
retVal = []
for p in appx:
retVal.append((int(p[0][0]),int(p[0][1])))
else:
retVal = raw
return retVal
def fitEdge(self,guess,window=10,threshold=128, measurements=5, darktolight=True, lighttodark=True,departurethreshold=1):
"""
**SUMMARY**
Fit edge in a binary/gray image using an initial guess and the least squares method.
The functions returns a single line
**PARAMETERS**
* *guess* - A tuples of the form ((x0,y0),(x1,y1)) which is an approximate guess
* *window* - A window around the guess to search.
* *threshold* - the threshold above which we count a pixel as a line
* *measurements* -the number of line projections to use for fitting the line
TODO: Constrict a line to black to white or white to black
Right vs. Left orientation.
**RETURNS**
A a line object
**EXAMPLE**
"""
searchLines = FeatureSet()
fitPoints = FeatureSet()
x1 = guess[0][0]
x2 = guess[1][0]
y1 = guess[0][1]
y2 = guess[1][1]
dx = float((x2-x1))/(measurements-1)
dy = float((y2-y1))/(measurements-1)
s = np.zeros((measurements,2))
lpstartx = np.zeros(measurements)
lpstarty = np.zeros(measurements)
lpendx = np.zeros(measurements)
lpendy = np.zeros(measurements)
linefitpts = np.zeros((measurements,2))
#obtain equation for initial guess line
if( x1==x2): #vertical line must be handled as special case since slope isn't defined
m=0
mo = 0
b = x1
for i in xrange(0, measurements):
s[i][0] = x1
s[i][1] = y1 + i * dy
lpstartx[i] = s[i][0] + window
lpstarty[i] = s[i][1]
lpendx[i] = s[i][0] - window
lpendy[i] = s[i][1]
Cur_line = Line(self,((lpstartx[i],lpstarty[i]),(lpendx[i],lpendy[i])))
((lpstartx[i],lpstarty[i]),(lpendx[i],lpendy[i])) = Cur_line.cropToImageEdges().end_points
searchLines.append(Cur_line)
tmp = self.getThresholdCrossing((int(lpstartx[i]),int(lpstarty[i])),(int(lpendx[i]),int(lpendy[i])),threshold=threshold,lighttodark=lighttodark, darktolight=darktolight, departurethreshold=departurethreshold)
fitPoints.append(Circle(self,tmp[0],tmp[1],3))
linefitpts[i] = tmp
else:
m = float((y2-y1))/(x2-x1)
b = y1 - m*x1
mo = -1/m #slope of orthogonal line segments
#obtain points for measurement along the initial guess line
for i in xrange(0, measurements):
s[i][0] = x1 + i * dx
s[i][1] = y1 + i * dy
fx = (math.sqrt(math.pow(window,2))/(1+mo))/2
fy = fx * mo
lpstartx[i] = s[i][0] + fx
lpstarty[i] = s[i][1] + fy
lpendx[i] = s[i][0] - fx
lpendy[i] = s[i][1] - fy
Cur_line = Line(self,((lpstartx[i],lpstarty[i]),(lpendx[i],lpendy[i])))
((lpstartx[i],lpstarty[i]),(lpendx[i],lpendy[i])) = Cur_line.cropToImageEdges().end_points
searchLines.append(Cur_line)
tmp = self.getThresholdCrossing((int(lpstartx[i]),int(lpstarty[i])),(int(lpendx[i]),int(lpendy[i])),threshold=threshold,lighttodark=lighttodark, darktolight=darktolight,departurethreshold=departurethreshold)
fitPoints.append((tmp[0],tmp[1]))
linefitpts[i] = tmp
badpts = []
for j in range(len(linefitpts)):
if (linefitpts[j,0] == -1) or (linefitpts[j,1] == -1):
badpts.append(j)
for pt in badpts:
linefitpts = np.delete(linefitpts,pt,axis=0)
x = linefitpts[:,0]
y = linefitpts[:,1]
ymin = np.min(y)
ymax = np.max(y)
xmax = np.max(x)
xmin = np.min(x)
if( (xmax-xmin) > (ymax-ymin) ):
# do the least squares
A = np.vstack([x,np.ones(len(x))]).T
m,c = nla.lstsq(A,y)[0]
y0 = int(m*xmin+c)
y1 = int(m*xmax+c)
finalLine = Line(self,((xmin,y0),(xmax,y1)))
else:
# do the least squares
A = np.vstack([y,np.ones(len(y))]).T
m,c = nla.lstsq(A,x)[0]
x0 = int(ymin*m+c)
x1 = int(ymax*m+c)
finalLine = Line(self,((x0,ymin),(x1,ymax)))
return finalLine, searchLines, fitPoints
def getThresholdCrossing(self, pt1, pt2, threshold=128, darktolight=True, lighttodark=True, departurethreshold=1):
"""
**SUMMARY**
This function takes in an image and two points, calculates the intensity
profile between the points, and returns the single point at which the profile
crosses an intensity
**PARAMETERS**
* *p1, p2* - the starting and ending points in tuple form e.g. (1,2)
* *threshold* pixel value of desired threshold crossing
* *departurethreshold* - noise reduction technique. requires this many points to be above the threshold to trigger crossing
**RETURNS**
A a lumpy numpy array of the pixel values. Ususally this is in BGR format.
**EXAMPLE**
>>> img = Image("lenna")
>>> myColor = [0,0,0]
>>> sl = img.getHorzScanline(422)
>>> sll = sl.tolist()
>>> for p in sll:
>>> if( p == myColor ):
>>> # do something
**SEE ALSO**
:py:meth:`getHorzScanlineGray`
:py:meth:`getVertScanlineGray`
:py:meth:`getVertScanline`
"""
linearr = self.getDiagonalScanlineGrey(pt1,pt2)
ind = 0
crossing = -1
if departurethreshold==1:
while ind < linearr.size-1:
if darktolight:
if linearr[ind] <=threshold and linearr[ind+1] > threshold:
crossing = ind
break
if lighttodark:
if linearr[ind] >= threshold and linearr[ind+1] < threshold:
crossing = ind
break
ind = ind +1
if crossing != -1:
xind = pt1[0] + int(round((pt2[0]-pt1[0])*crossing/linearr.size))
yind = pt1[1] + int(round((pt2[1]-pt1[1])*crossing/linearr.size))
retVal = (xind,yind)
else:
retVal = (-1,-1)
#print 'Edgepoint not found.'
else:
while ind < linearr.size-(departurethreshold+1):
if darktolight:
if linearr[ind] <=threshold and (linearr[ind+1:ind+1+departurethreshold] > threshold).all():
crossing = ind
break
if lighttodark:
if linearr[ind] >= threshold and (linearr[ind+1:ind+1+departurethreshold] < threshold).all():
crossing = ind
break
ind = ind +1
if crossing != -1:
xind = pt1[0] + int(round((pt2[0]-pt1[0])*crossing/linearr.size))
yind = pt1[1] + int(round((pt2[1]-pt1[1])*crossing/linearr.size))
retVal = (xind,yind)
else:
retVal = (-1,-1)
#print 'Edgepoint not found.'
return retVal
def getDiagonalScanlineGrey(self, pt1, pt2):
"""
**SUMMARY**
This function returns a single line of greyscale values from the image.
TODO: speed inprovements and RGB tolerance
**PARAMETERS**
* *pt1, pt2* - the starting and ending points in tuple form e.g. (1,2)
**RETURNS**
An array of the pixel values.
**EXAMPLE**
>>> img = Image("lenna")
>>> sl = img.getDiagonalScanlineGrey((100,200),(300,400))
**SEE ALSO**
:py:meth:`getHorzScanlineGray`
:py:meth:`getVertScanlineGray`
:py:meth:`getVertScanline`
"""
if not self.isGray():
self = self.toGray()
#self = self._getGrayscaleBitmap()
width = round(math.sqrt(math.pow(pt2[0]-pt1[0],2) + math.pow(pt2[1]-pt1[1],2)))
retVal = np.zeros(width)
for x in range(0, retVal.size):
xind = pt1[0] + int(round((pt2[0]-pt1[0])*x/retVal.size))
yind = pt1[1] + int(round((pt2[1]-pt1[1])*x/retVal.size))
current_pixel = self.getPixel(xind,yind)
retVal[x] = current_pixel[0]
return retVal
def fitLines(self,guesses,window=10,threshold=128):
"""
**SUMMARY**
Fit lines in a binary/gray image using an initial guess and the least squares method.
The lines are returned as a line feature set.
**PARAMETERS**
* *guesses* - A list of tuples of the form ((x0,y0),(x1,y1)) where each of the lines
is an approximate guess.
* *window* - A window around the guess to search.
* *threshold* - the threshold above which we count a pixel as a line
**RETURNS**
A feature set of line features, one per guess.
**EXAMPLE**
>>> img = Image("lsq.png")
>>> guesses = [((313,150),(312,332)),((62,172),(252,52)),((102,372),(182,182)),((372,62),(572,162)),((542,362),(462,182)),((232,412),(462,423))]
>>> l = img.fitLines(guesses,window=10)
>>> l.draw(color=Color.RED,width=3)
>>> for g in guesses:
>>> img.drawLine(g[0],g[1],color=Color.YELLOW)
>>> img.show()
"""
retVal = FeatureSet()
i =0
for g in guesses:
# Guess the size of the crop region from the line guess and the window.
ymin = np.min([g[0][1],g[1][1]])
ymax = np.max([g[0][1],g[1][1]])
xmin = np.min([g[0][0],g[1][0]])
xmax = np.max([g[0][0],g[1][0]])
xminW = np.clip(xmin-window,0,self.width)
xmaxW = np.clip(xmax+window,0,self.width)
yminW = np.clip(ymin-window,0,self.height)
ymaxW = np.clip(ymax+window,0,self.height)
temp = self.crop(xminW,yminW,xmaxW-xminW,ymaxW-yminW)
temp = temp.getGrayNumpy()
# pick the lines above our threshold
x,y = np.where(temp>threshold)
pts = zip(x,y)
gpv = np.array([float(g[0][0]-xminW),float(g[0][1]-yminW)])
gpw = np.array([float(g[1][0]-xminW),float(g[1][1]-yminW)])
def lineSegmentToPoint(p):
w = gpw
v = gpv
#print w,v
p = np.array([float(p[0]),float(p[1])])
l2 = np.sum((w-v)**2)
t = float(np.dot((p-v),(w-v))) / float(l2)
if( t < 0.00 ):
return np.sqrt(np.sum((p-v)**2))
elif(t > 1.0):
return np.sqrt(np.sum((p-w)**2))
else:
project = v + (t*(w-v))
return np.sqrt(np.sum((p-project)**2))
# http://stackoverflow.com/questions/849211/shortest-distance-between-a-point-and-a-line-segment
distances = np.array(map(lineSegmentToPoint,pts))
closepoints = np.where(distances<window)[0]
pts = np.array(pts)
if( len(closepoints) < 3 ):
continue
good_pts = pts[closepoints]
good_pts = good_pts.astype(float)
x = good_pts[:,0]
y = good_pts[:,1]
# do the shift from our crop
# generate the line values
x = x + xminW
y = y + yminW
ymin = np.min(y)
ymax = np.max(y)
xmax = np.max(x)
xmin = np.min(x)
if( (xmax-xmin) > (ymax-ymin) ):
# do the least squares
A = np.vstack([x,np.ones(len(x))]).T
m,c = nla.lstsq(A,y)[0]
y0 = int(m*xmin+c)
y1 = int(m*xmax+c)
retVal.append(Line(self,((xmin,y0),(xmax,y1))))
else:
# do the least squares
A = np.vstack([y,np.ones(len(y))]).T
m,c = nla.lstsq(A,x)[0]
x0 = int(ymin*m+c)
x1 = int(ymax*m+c)
retVal.append(Line(self,((x0,ymin),(x1,ymax))))
return retVal
def fitLinePoints(self,guesses,window=(11,11), samples=20,params=(0.1,0.1,0.1)):
"""
**DESCRIPTION**
This method uses the snakes / active contour approach in an attempt to
fit a series of points to a line that may or may not be exactly linear.
**PARAMETERS**
* *guesses* - A set of lines that we wish to fit to. The lines are specified
as a list of tuples of (x,y) tuples. E.g. [((x0,y0),(x1,y1))....]
* *window* - The search window in pixels for the active contours approach.
* *samples* - The number of points to sample along the input line,
these are the initial conditions for active contours method.
* *params* - the alpha, beta, and gamma values for the active contours routine.
**RETURNS**
A list of fitted contour points. Each contour is a list of (x,y) tuples.
**EXAMPLE**
>>> img = Image("lsq.png")
>>> guesses = [((313,150),(312,332)),((62,172),(252,52)),((102,372),(182,182)),((372,62),(572,162)),((542,362),(462,182)),((232,412),(462,423))]
>>> r = img.fitLinePoints(guesses)
>>> for rr in r:
>>> img.drawLine(rr[0],rr[1],color=Color.RED,width=3)
>>> for g in guesses:
>>> img.drawLine(g[0],g[1],color=Color.YELLOW)
>>> img.show()
"""
pts = []
for g in guesses:
#generate the approximation
bestGuess = []
dx = float(g[1][0]-g[0][0])
dy = float(g[1][1]-g[0][1])
l = np.sqrt((dx*dx)+(dy*dy))
if( l <= 0 ):
logger.warning("Can't Do snakeFitPoints without OpenCV >= 2.3.0")
return
dx = dx/l
dy = dy/l
for i in range(-1,samples+1):
t = i*(l/samples)
bestGuess.append((int(g[0][0]+(t*dx)),int(g[0][1]+(t*dy))))
# do the snake fitting
appx = self.fitContour(bestGuess,window=window,params=params,doAppx=False)
pts.append(appx)
return pts
def drawPoints(self, pts, color=Color.RED, sz=3, width=-1):
"""
**DESCRIPTION**
A quick and dirty points rendering routine.
**PARAMETERS**
* *pts* - pts a list of (x,y) points.
* *color* - a color for our points.
* *sz* - the circle radius for our points.
* *width* - if -1 fill the point, otherwise the size of point border
**RETURNS**
None - This is an inplace operation.
**EXAMPLE**
>>> img = Image("lenna")
>>> img.drawPoints([(10,10),(30,30)])
>>> img.show()
"""
for p in pts:
self.drawCircle(p,sz,color,width)
return None
def sobel(self, xorder=1, yorder=1, doGray=True, aperture=5, aperature=None):
"""
**DESCRIPTION**
Sobel operator for edge detection
**PARAMETERS**
* *xorder* - int - Order of the derivative x.
* *yorder* - int - Order of the derivative y.
* *doGray* - Bool - grayscale or not.
* *aperture* - int - Size of the extended Sobel kernel. It must be 1, 3, 5, or 7.
**RETURNS**
Image with sobel opeartor applied on it
**EXAMPLE**
>>> img = Image("lenna")
>>> s = img.sobel()
>>> s.show()
"""
aperture = aperature if aperature else aperture
retVal = None
try:
import cv2
except:
logger.warning("Can't do Sobel without OpenCV >= 2.3.0")
return None
if( aperture != 1 and aperture != 3 and aperture != 5 and aperture != 7 ):
logger.warning("Bad Sobel Aperture, values are [1,3,5,7].")
return None
if( doGray ):
dst = cv2.Sobel(self.getGrayNumpy(),cv2.cv.CV_32F,xorder,yorder,ksize=aperture)
minv = np.min(dst)
maxv = np.max(dst)
cscale = 255/(maxv-minv)
shift = -1*(minv)
t = np.zeros(self.size(),dtype='uint8')
t = cv2.convertScaleAbs(dst,t,cscale,shift/255.0)
retVal = Image(t)
else:
layers = self.splitChannels(grayscale=False)
sobel_layers = []
for layer in layers:
dst = cv2.Sobel(layer.getGrayNumpy(),cv2.cv.CV_32F,xorder,yorder,ksize=aperture)
minv = np.min(dst)
maxv = np.max(dst)
cscale = 255/(maxv-minv)
shift = -1*(minv)
t = np.zeros(self.size(),dtype='uint8')
t = cv2.convertScaleAbs(dst,t,cscale,shift/255.0)
sobel_layers.append(Image(t))
b,g,r = sobel_layers
retVal = self.mergeChannels(b,g,r)
return retVal
def track(self, method="CAMShift", ts=None, img=None, bb=None, **kwargs):
"""
**DESCRIPTION**
Tracking the object surrounded by the bounding box in the given
image or TrackSet.
**PARAMETERS**
* *method* - str - The Tracking Algorithm to be applied
* *ts* - TrackSet - SimpleCV.Features.TrackSet.
* *img* - Image - Image to be tracked or list - List of Images to be tracked.
* *bb* - tuple - Bounding Box tuple (x, y, w, h)
**Optional Parameters**
*CAMShift*
CAMShift Tracker is based on mean shift thresholding algorithm which is
combined with an adaptive region-sizing step. Histogram is calcualted based
on the mask provided. If mask is not provided, hsv transformed image of the
provided image is thresholded using inRange function (band thresholding).
lower HSV and upper HSV values are used inRange function. If the user doesn't
provide any range values, default range values are used.
Histogram is back projected using previous images to get an appropriate image
and it passed to camshift function to find the object in the image. Users can
decide the number of images to be used in back projection by providing num_frames.
lower - Lower HSV value for inRange thresholding. tuple of (H, S, V). Default : (0, 60, 32)
upper - Upper HSV value for inRange thresholding. tuple of (H, S, V). Default: (180, 255, 255)
mask - Mask to calculate Histogram. It's better if you don't provide one. Default: calculated using above thresholding ranges.
num_frames - number of frames to be backtracked. Default: 40
*LK*
LK Tracker is based on Optical Flow method. In brief, optical flow can be
defined as the apparent motion of objects caused by the relative motion between
an observer and the scene. (Wikipedia).
LK Tracker first finds some good feature points in the given bounding box in the image.
These are the tracker points. In consecutive frames, optical flow of these feature points
is calculated. Users can limit the number of feature points by provideing maxCorners and
qualityLevel. number of features will always be less than maxCorners. These feature points
are calculated using Harris Corner detector. It returns a matrix with each pixel having
some quality value. Only good features are used based upon the qualityLevel provided. better
features have better quality measure and hence are more suitable to track.
Users can set minimum distance between each features by providing minDistance.
LK tracker finds optical flow using a number of pyramids and users can set this number by
providing maxLevel and users can set size of the search window for Optical Flow by setting
winSize.
docs from http://docs.opencv.org/
maxCorners - Maximum number of corners to return in goodFeaturesToTrack. If there are more corners than are found, the strongest of them is returned. Default: 4000
qualityLevel - Parameter characterizing the minimal accepted quality of image corners. The parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue or the Harris function response. The corners with the quality measure less than the product are rejected. For example, if the best corner has the quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure less than 15 are rejected. Default: 0.08
minDistance - Minimum possible Euclidean distance between the returned corners. Default: 2
blockSize - Size of an average block for computing a derivative covariation matrix over each pixel neighborhood. Default: 3
winSize - size of the search window at each pyramid level. Default: (10, 10)
maxLevel - 0-based maximal pyramid level number; if set to 0, pyramids are not used (single level), Default: 10 if set to 1, two levels are used, and so on
*SURF*
SURF based tracker finds keypoints in the template and computes the descriptor. The template is
chosen based on the bounding box provided with the first image. The image is cropped and stored
as template. SURF keypoints are found and descriptor is computed for the template and stored.
SURF keypoints are found in the image and its descriptor is computed. Image keypoints and template
keypoints are matched using K-nearest neighbor algorithm. Matched keypoints are filtered according
to the knn distance of the points. Users can set this criteria by setting distance.
Density Based Clustering algorithm (DBSCAN) is applied on the matched keypoints to filter out points
that are in background. DBSCAN creates a cluster of object points anc background points. These background
points are discarded. Users can set certain parameters for DBSCAN which are listed below.
K-means is applied on matched KeyPoints with k=1 to find the center of the cluster and then bounding
box is predicted based upon the position of all the object KeyPoints.
eps_val - eps for DBSCAN. The maximum distance between two samples for them to be considered as in the same neighborhood. default: 0.69
min_samples - min number of samples in DBSCAN. The number of samples in a neighborhood for a point to be considered as a core point. default: 5
distance - thresholding KNN distance of each feature. if KNN distance > distance, point is discarded. default: 100
*MFTrack*
Median Flow tracker is similar to LK tracker (based on Optical Flow), but it's more advanced, better and
faster.
In MFTrack, tracking points are decided based upon the number of horizontal and vertical points and window
size provided by the user. Unlike LK Tracker, good features are not found which saves a huge amount of time.
feature points are selected symmetrically in the bounding box.
Total number of feature points to be tracked = numM * numN.
If the width and height of bounding box is 200 and 100 respectively, and numM = 10 and numN = 10,
there will be 10 points in the bounding box equally placed(10 points in 200 pixels) in each row. and 10 equally placed
points (10 points in 100 pixels) in each column. So total number of tracking points = 100.
numM > 0
numN > 0 (both may not be equal)
users can provide a margin around the bounding box that will be considered to place feature points and
calculate optical flow.
Optical flow is calculated from frame1 to frame2 and from frame2 to frame1. There might be some points
which give inaccurate optical flow, to eliminate these points the above method is used. It is called
forward-backward error tracking. Optical Flow seach window size can be set usung winsize_lk.
For each point, comparision is done based on the quadratic area around it.
The length of the square window can be set using winsize.
numM - Number of points to be tracked in the bounding box
in height direction.
default: 10
numN - Number of points to be tracked in the bounding box
in width direction.
default: 10
margin - Margin around the bounding box.
default: 5
winsize_lk - Optical Flow search window size.
default: 4
winsize - Size of quadratic area around the point which is compared.
default: 10
Available Tracking Methods
- CamShift
- LK
- SURF
- MFTrack
**RETURNS**
SimpleCV.Features.TrackSet
Returns a TrackSet with all the necessary attributes.
**HOW TO**
>>> ts = img.track("camshift", img=img1, bb=bb)
Here TrackSet is returned. All the necessary attributes will be included in the trackset.
After getting the trackset you need not provide the bounding box or image. You provide TrackSet as parameter to track().
Bounding box and image will be taken from the trackset.
So. now
>>> ts = new_img.track("camshift",ts)
The new Tracking feature will be appended to the given trackset and that will be returned.
So, to use it in loop::
img = cam.getImage()
bb = (img.width/4,img.height/4,img.width/4,img.height/4)
ts = img.track(img=img, bb=bb)
while (True):
img = cam.getImage()
ts = img.track("camshift", ts=ts)
ts = []
while (some_condition_here):
img = cam.getImage()
ts = img.track("camshift",ts,img0,bb)
now here in first loop iteration since ts is empty, img0 and bb will be considered.
New tracking object will be created and added in ts (TrackSet)
After first iteration, ts is not empty and hence the previous
image frames and bounding box will be taken from ts and img0
and bb will be ignored.
# Instead of loop, give a list of images to be tracked.
ts = []
imgs = [img1, img2, img3, ..., imgN]
ts = img0.track("camshift", ts, imgs, bb)
ts.drawPath()
ts[-1].image.show()
Using Optional Parameters:
for CAMShift
>>> ts = []
>>> ts = img.track("camshift", ts, img1, bb, lower=(40, 100, 100), upper=(100, 250, 250))
You can provide some/all/None of the optional parameters listed for CAMShift.
for LK
>>> ts = []
>>> ts = img.track("lk", ts, img1, bb, maxCorners=4000, qualityLevel=0.5, minDistance=3)
You can provide some/all/None of the optional parameters listed for LK.
for SURF
>>> ts = []
>>> ts = img.track("surf", ts, img1, bb, eps_val=0.7, min_samples=8, distance=200)
You can provide some/all/None of the optional parameters listed for SURF.
for MFTrack
>>> ts = []
>>> ts = img.track("mftrack", ts, img1, bb, numM=12, numN=12, winsize=15)
You can provide some/all/None of the optional parameters listed for MFTrack.
Check out Tracking examples provided in the SimpleCV source code.
READ MORE:
CAMShift Tracker:
Uses meanshift based CAMShift thresholding technique. Blobs and objects with
single tone or tracked very efficiently. CAMshift should be preferred if you
are trying to track faces. It is optimized to track faces.
LK (Lucas Kanade) Tracker:
It is based on LK Optical Flow. It calculates Optical flow in frame1 to frame2
and also in frame2 to frame1 and using back track error, filters out false
positives.
SURF based Tracker:
Matches keypoints from the template image and the current frame.
flann based matcher is used to match the keypoints.
Density based clustering is used classify points as in-region (of bounding box)
and out-region points. Using in-region points, new bounding box is predicted using
k-means.
Median Flow Tracker:
Media Flow Tracker is the base tracker that is used in OpenTLD. It is based on
Optical Flow. It calculates optical flow of the points in the bounding box from
frame 1 to frame 2 and from frame 2 to frame 1 and using back track error, removes
false positives. As the name suggests, it takes the median of the flow, and eliminates
points.
"""
if not ts and not img:
print "Invalid Input. Must provide FeatureSet or Image"
return None
if not ts and not bb:
print "Invalid Input. Must provide Bounding Box with Image"
return None
if not ts:
ts = TrackSet()
else:
img = ts[-1].image
bb = ts[-1].bb
try:
import cv2
except ImportError:
print "Tracking is available for OpenCV >= 2.3"
return None
if type(img) == list:
ts = self.track(method, ts, img[0], bb, **kwargs)
for i in img:
ts = i.track(method, ts, **kwargs)
return ts
# Issue #256 - (Bug) Memory management issue due to too many number of images.
nframes = 300
if 'nframes' in kwargs:
nframes = kwargs['nframes']
if len(ts) > nframes:
ts.trimList(50)
if method.lower() == "camshift":
track = camshiftTracker(self, bb, ts, **kwargs)
ts.append(track)
elif method.lower() == "lk":
track = lkTracker(self, bb, ts, img, **kwargs)
ts.append(track)
elif method.lower() == "surf":
try:
from scipy.spatial import distance as Dis
from sklearn.cluster import DBSCAN
except ImportError:
logger.warning("sklearn required")
return None
if not hasattr(cv2, "FeatureDetector_create"):
warnings.warn("OpenCV >= 2.4.3 required. Returning None.")
return None
track = surfTracker(self, bb, ts, **kwargs)
ts.append(track)
elif method.lower() == "mftrack":
track = mfTracker(self, bb, ts, img, **kwargs)
ts.append(track)
return ts
def _to32F(self):
"""
**SUMMARY**
Convert this image to a 32bit floating point image.
"""
retVal = cv.CreateImage((self.width,self.height), cv.IPL_DEPTH_32F, 3)
cv.Convert(self.getBitmap(),retVal)
return retVal
def __getstate__(self):
return dict( size = self.size(), colorspace = self._colorSpace, image = self.applyLayers().getBitmap().tostring() )
def __setstate__(self, mydict):
self._bitmap = cv.CreateImageHeader(mydict['size'], cv.IPL_DEPTH_8U, 3)
cv.SetData(self._bitmap, mydict['image'])
self._colorSpace = mydict['colorspace']
self.width = mydict['size'][0]
self.height = mydict['size'][1]
def area(self):
'''
Returns the area of the Image.
'''
return self.width * self.height
def _get_header_anim(self):
""" Animation header. To replace the getheader()[0] """
bb = "GIF89a"
bb += int_to_bin(self.size()[0])
bb += int_to_bin(self.size()[1])
bb += "\x87\x00\x00"
return bb
def rotate270(self):
"""
**DESCRIPTION**
Rotate the image 270 degrees to the left, the same as 90 degrees to the right.
This is the same as rotateRight()
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> img.rotate270().show()
"""
retVal = cv.CreateImage((self.height, self.width), cv.IPL_DEPTH_8U, 3)
cv.Transpose(self.getBitmap(), retVal)
cv.Flip(retVal, retVal, 1)
return(Image(retVal, colorSpace=self._colorSpace))
def rotate90(self):
"""
**DESCRIPTION**
Rotate the image 90 degrees to the left, the same as 270 degrees to the right.
This is the same as rotateRight()
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> img.rotate90().show()
"""
retVal = cv.CreateImage((self.height, self.width), cv.IPL_DEPTH_8U, 3)
cv.Transpose(self.getBitmap(), retVal)
cv.Flip(retVal, retVal, 0) # vertical
return(Image(retVal, colorSpace=self._colorSpace))
def rotateLeft(self): # same as 90
"""
**DESCRIPTION**
Rotate the image 90 degrees to the left.
This is the same as rotate 90.
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> img.rotateLeft().show()
"""
return self.rotate90()
def rotateRight(self): # same as 270
"""
**DESCRIPTION**
Rotate the image 90 degrees to the right.
This is the same as rotate 270.
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> img.rotateRight().show()
"""
return self.rotate270()
def rotate180(self):
"""
**DESCRIPTION**
Rotate the image 180 degrees to the left/right.
This is the same as rotate 90.
**RETURNS**
A SimpleCV image.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> img.rotate180().show()
"""
retVal = cv.CreateImage((self.width, self.height), cv.IPL_DEPTH_8U, 3)
cv.Flip(self.getBitmap(), retVal, 0) #vertical
cv.Flip(retVal, retVal, 1)#horizontal
return(Image(retVal, colorSpace=self._colorSpace))
def verticalHistogram(self, bins=10, threshold=128,normalize=False,forPlot=False):
"""
**DESCRIPTION**
This method generates histogram of the number of grayscale pixels
greater than the provided threshold. The method divides the image
into a number evenly spaced vertical bins and then counts the number
of pixels where the pixel is greater than the threshold. This method
is helpful for doing basic morphological analysis.
**PARAMETERS**
* *bins* - The number of bins to use.
* *threshold* - The grayscale threshold. We count pixels greater than this value.
* *normalize* - If normalize is true we normalize the bin countsto sum to one. Otherwise we return the number of pixels.
* *forPlot* - If this is true we return the bin indicies, the bin counts, and the bin widths as a tuple. We can use these values in pyplot.bar to quickly plot the histogram.
**RETURNS**
The default settings return the raw bin counts moving from left to
right on the image. If forPlot is true we return a tuple that
contains a list of bin labels, the bin counts, and the bin widths.
This tuple can be used to plot the histogram using
matplotlib.pyplot.bar function.
**EXAMPLE**
>>> import matplotlib.pyplot as plt
>>> img = Image('lenna')
>>> plt.bar(*img.verticalHistogram(threshold=128,bins=10,normalize=False,forPlot=True),color='y')
>>> plt.show()
**NOTES**
See: http://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
See: http://matplotlib.org/api/pyplot_api.html?highlight=hist#matplotlib.pyplot.hist
"""
if( bins <= 0 ):
raise Exception("Not enough bins")
img = self.getGrayNumpy()
pts = np.where(img>threshold)
y = pts[1]
hist = np.histogram(y,bins=bins,range=(0,self.height),normed=normalize)
retVal = None
if( forPlot ):
# for using matplotlib bar command
# bin labels, bin values, bin width
retVal=(hist[1][0:-1],hist[0],self.height/bins)
else:
retVal = hist[0]
return retVal
def horizontalHistogram(self, bins=10, threshold=128,normalize=False,forPlot=False):
"""
**DESCRIPTION**
This method generates histogram of the number of grayscale pixels
greater than the provided threshold. The method divides the image
into a number evenly spaced horizontal bins and then counts the number
of pixels where the pixel is greater than the threshold. This method
is helpful for doing basic morphological analysis.
**PARAMETERS**
* *bins* - The number of bins to use.
* *threshold* - The grayscale threshold. We count pixels greater than this value.
* *normalize* - If normalize is true we normalize the bin counts to sum to one. Otherwise we return the number of pixels.
* *forPlot* - If this is true we return the bin indicies, the bin counts, and the bin widths as a tuple. We can use these values in pyplot.bar to quickly plot the histogram.
**RETURNS**
The default settings return the raw bin counts moving from top to
bottom on the image. If forPlot is true we return a tuple that
contains a list of bin labels, the bin counts, and the bin widths.
This tuple can be used to plot the histogram using
matplotlib.pyplot.bar function.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> plt.bar(img.horizontalHistogram(threshold=128,bins=10,normalize=False,forPlot=True),color='y')
>>>> plt.show())
**NOTES**
See: http://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html
See: http://matplotlib.org/api/pyplot_api.html?highlight=hist#matplotlib.pyplot.hist
"""
if( bins <= 0 ):
raise Exception("Not enough bins")
img = self.getGrayNumpy()
pts = np.where(img>threshold)
x = pts[0]
hist = np.histogram(x,bins=bins,range=(0,self.width),normed=normalize)
retVal = None
if( forPlot ):
# for using matplotlib bar command
# bin labels, bin values, bin width
retVal=(hist[1][0:-1],hist[0],self.width/bins)
else:
retVal = hist[0]
return retVal
def getLineScan(self,x=None,y=None,pt1=None,pt2=None,channel = -1):
"""
**SUMMARY**
This function takes in a channel of an image or grayscale by default
and then pulls out a series of pixel values as a linescan object
than can be manipulated further.
**PARAMETERS**
* *x* - Take a vertical line scan at the column x.
* *y* - Take a horizontal line scan at the row y.
* *pt1* - Take a line scan between two points on the line the line scan values always go in the +x direction
* *pt2* - Second parameter for a non-vertical or horizontal line scan.
* *channel* - To select a channel. eg: selecting a channel RED,GREEN or BLUE. If set to -1 it operates with gray scale values
**RETURNS**
A SimpleCV.LineScan object or None if the method fails.
**EXAMPLE**
>>>> import matplotlib.pyplot as plt
>>>> img = Image('lenna')
>>>> a = img.getLineScan(x=10)
>>>> b = img.getLineScan(y=10)
>>>> c = img.getLineScan(pt1 = (10,10), pt2 = (500,500) )
>>>> plt.plot(a)
>>>> plt.plot(b)
>>>> plt.plot(c)
>>>> plt.show()
"""
if channel == -1:
img = self.getGrayNumpy()
else:
try:
img = self.getNumpy()[:,:,channel]
except IndexError:
print 'Channel missing!'
return None
retVal = None
if( x is not None and y is None and pt1 is None and pt2 is None):
if( x >= 0 and x < self.width):
retVal = LineScan(img[x,:])
retVal.image = self
retVal.pt1 = (x,0)
retVal.pt2 = (x,self.height)
retVal.col = x
x = np.ones((1,self.height))[0]*x
y = range(0,self.height,1)
pts = zip(x,y)
retVal.pointLoc = pts
else:
warnings.warn("ImageClass.getLineScan - that is not valid scanline.")
return None
elif( x is None and y is not None and pt1 is None and pt2 is None):
if( y >= 0 and y < self.height):
retVal = LineScan(img[:,y])
retVal.image = self
retVal.pt1 = (0,y)
retVal.pt2 = (self.width,y)
retVal.row = y
y = np.ones((1,self.width))[0]*y
x = range(0,self.width,1)
pts = zip(x,y)
retVal.pointLoc = pts
else:
warnings.warn("ImageClass.getLineScan - that is not valid scanline.")
return None
pass
elif( (isinstance(pt1,tuple) or isinstance(pt1,list)) and
(isinstance(pt2,tuple) or isinstance(pt2,list)) and
len(pt1) == 2 and len(pt2) == 2 and
x is None and y is None):
pts = self.bresenham_line(pt1,pt2)
retVal = LineScan([img[p[0],p[1]] for p in pts])
retVal.pointLoc = pts
retVal.image = self
retVal.pt1 = pt1
retVal.pt2 = pt2
else:
# an invalid combination - warn
warnings.warn("ImageClass.getLineScan - that is not valid scanline.")
return None
retVal.channel = channel
return retVal
def setLineScan(self, linescan,x=None,y=None,pt1=None,pt2=None,channel = -1):
"""
**SUMMARY**
This function helps you put back the linescan in the image.
**PARAMETERS**
* *linescan* - LineScan object
* *x* - put line scan at the column x.
* *y* - put line scan at the row y.
* *pt1* - put line scan between two points on the line the line scan values always go in the +x direction
* *pt2* - Second parameter for a non-vertical or horizontal line scan.
* *channel* - To select a channel. eg: selecting a channel RED,GREEN or BLUE. If set to -1 it operates with gray scale values
**RETURNS**
A SimpleCV.Image
**EXAMPLE**
>>> img = Image('lenna')
>>> a = img.getLineScan(x=10)
>>> for index in range(len(a)):
... a[index] = 0
>>> newimg = img.putLineScan(a, x=50)
>>> newimg.show()
# This will show you a black line in column 50.
"""
#retVal = self.toGray()
if channel == -1:
img = np.copy(self.getGrayNumpy())
else:
try:
img = np.copy(self.getNumpy()[:,:,channel])
except IndexError:
print 'Channel missing!'
return None
if( x is None and y is None and pt1 is None and pt2 is None):
if(linescan.pt1 is None or linescan.pt2 is None):
warnings.warn("ImageClass.setLineScan: No coordinates to re-insert linescan.")
return None
else:
pt1 = linescan.pt1
pt2 = linescan.pt2
if( pt1[0] == pt2[0] and np.abs(pt1[1]-pt2[1])==self.height):
x = pt1[0] # vertical line
pt1=None
pt2=None
elif( pt1[1] == pt2[1] and np.abs(pt1[0]-pt2[0])==self.width):
y = pt1[1] # horizontal line
pt1=None
pt2=None
retVal = None
if( x is not None and y is None and pt1 is None and pt2 is None):
if( x >= 0 and x < self.width):
if( len(linescan) != self.height ):
linescan = linescan.resample(self.height)
#check for number of points
#linescan = np.array(linescan)
img[x,:] = np.clip(linescan[:], 0, 255)
else:
warnings.warn("ImageClass.setLineScan: No coordinates to re-insert linescan.")
return None
elif( x is None and y is not None and pt1 is None and pt2 is None):
if( y >= 0 and y < self.height):
if( len(linescan) != self.width ):
linescan = linescan.resample(self.width)
#check for number of points
#linescan = np.array(linescan)
img[:,y] = np.clip(linescan[:], 0, 255)
else:
warnings.warn("ImageClass.setLineScan: No coordinates to re-insert linescan.")
return None
elif( (isinstance(pt1,tuple) or isinstance(pt1,list)) and
(isinstance(pt2,tuple) or isinstance(pt2,list)) and
len(pt1) == 2 and len(pt2) == 2 and
x is None and y is None):
pts = self.bresenham_line(pt1,pt2)
if( len(linescan) != len(pts) ):
linescan = linescan.resample(len(pts))
#linescan = np.array(linescan)
linescan = np.clip(linescan[:], 0, 255)
idx = 0
for pt in pts:
img[pt[0],pt[1]]=linescan[idx]
idx = idx+1
else:
warnings.warn("ImageClass.setLineScan: No coordinates to re-insert linescan.")
return None
if channel == -1:
retVal = Image(img)
else:
temp = np.copy(self.getNumpy())
temp[:,:,channel] = img
retVal = Image(temp)
return retVal
def replaceLineScan(self, linescan, x=None, y=None, pt1=None, pt2=None, channel = None):
"""
**SUMMARY**
This function easily lets you replace the linescan in the image.
Once you get the LineScan object, you might want to edit it. Perform
some task, apply some filter etc and now you want to put it back where
you took it from. By using this function, it is not necessary to specify
where to put the data. It will automatically replace where you took the
LineScan from.
**PARAMETERS**
* *linescan* - LineScan object
* *x* - put line scan at the column x.
* *y* - put line scan at the row y.
* *pt1* - put line scan between two points on the line the line scan values always go in the +x direction
* *pt2* - Second parameter for a non-vertical or horizontal line scan.
* *channel* - To select a channel. eg: selecting a channel RED,GREEN or BLUE. If set to -1 it operates with gray scale values
**RETURNS**
A SimpleCV.Image
**EXAMPLE**
>>> img = Image('lenna')
>>> a = img.getLineScan(x=10)
>>> for index in range(len(a)):
... a[index] = 0
>>> newimg = img.replaceLineScan(a)
>>> newimg.show()
# This will show you a black line in column 10.
"""
if x is None and y is None and pt1 is None and pt2 is None and channel is None:
if linescan.channel == -1:
img = np.copy(self.getGrayNumpy())
else:
try:
img = np.copy(self.getNumpy()[:,:,linescan.channel])
except IndexError:
print 'Channel missing!'
return None
if linescan.row is not None:
if len(linescan) == self.width:
ls = np.clip(linescan, 0, 255)
img[:,linescan.row] = ls[:]
else:
warnings.warn("LineScan Size and Image size do not match")
return None
elif linescan.col is not None:
if len(linescan) == self.height:
ls = np.clip(linescan, 0, 255)
img[linescan.col,:] = ls[:]
else:
warnings.warn("LineScan Size and Image size do not match")
return None
elif linescan.pt1 and linescan.pt2:
pts = self.bresenham_line(linescan.pt1, linescan.pt2)
if( len(linescan) != len(pts) ):
linescan = linescan.resample(len(pts))
ls = np.clip(linescan[:], 0, 255)
idx = 0
for pt in pts:
img[pt[0],pt[1]]=ls[idx]
idx = idx+1
if linescan.channel == -1:
retVal = Image(img)
else:
temp = np.copy(self.getNumpy())
temp[:,:,linescan.channel] = img
retVal = Image(temp)
else:
if channel is None:
retVal = self.setLineScan(linescan , x, y, pt1, pt2, linescan.channel)
else:
retVal = self.setLineScan(linescan , x, y, pt1, pt2, channel)
return retVal
def getPixelsOnLine(self,pt1,pt2):
"""
**SUMMARY**
Return all of the pixels on an arbitrary line.
**PARAMETERS**
* *pt1* - The first pixel coordinate as an (x,y) tuple or list.
* *pt2* - The second pixel coordinate as an (x,y) tuple or list.
**RETURNS**
Returns a list of RGB pixels values.
**EXAMPLE**
>>>> img = Image('something.png')
>>>> img.getPixelsOnLine( (0,0), (img.width/2,img.height/2) )
"""
retVal = None
if( (isinstance(pt1,tuple) or isinstance(pt1,list)) and
(isinstance(pt2,tuple) or isinstance(pt2,list)) and
len(pt1) == 2 and len(pt2) == 2 ):
pts = self.bresenham_line(pt1,pt2)
retVal = [self.getPixel(p[0],p[1]) for p in pts]
else:
warnings.warn("ImageClass.getPixelsOnLine - The line you provided is not valid")
return retVal
def bresenham_line(self, (x,y), (x2,y2)):
"""
Brensenham line algorithm
cribbed from: http://snipplr.com/view.php?codeview&id=22482
This is just a helper method
"""
if (not 0 <= x <= self.width-1 or not 0 <= y <= self.height-1 or
not 0 <= x2 <= self.width-1 or not 0 <= y2 <= self.height-1):
l = Line(self, ((x, y), (x2, y2))).cropToImageEdges()
if l:
ep = list(l.end_points)
ep.sort()
x, y = ep[0]
x2, y2 = ep[1]
else:
return []
steep = 0
coords = []
dx = abs(x2 - x)
if (x2 - x) > 0:
sx = 1
else:
sx = -1
dy = abs(y2 - y)
if (y2 - y) > 0:
sy = 1
else:
sy = -1
if dy > dx:
steep = 1
x,y = y,x
dx,dy = dy,dx
sx,sy = sy,sx
d = (2 * dy) - dx
for i in range(0,dx):
if steep:
coords.append((y,x))
else:
coords.append((x,y))
while d >= 0:
y = y + sy
d = d - (2 * dx)
x = x + sx
d = d + (2 * dy)
coords.append((x2,y2))
return coords
def uncrop(self, ListofPts): #(x,y),(x2,y2)):
"""
**SUMMARY**
This function allows us to translate a set of points from the crop window back to the coordinate of the source window.
**PARAMETERS**
* *ListofPts* - set of points from cropped image.
**RETURNS**
Returns a list of coordinates in the source image.
**EXAMPLE**
>> img = Image('lenna')
>> croppedImg = img.crop(10,20,250,500)
>> sourcePts = croppedImg.uncrop([(2,3),(56,23),(24,87)])
"""
return [(i[0]+self._uncroppedX,i[1]+self._uncroppedY)for i in ListofPts]
def grid(self,dimensions=(10,10), color=(0, 0, 0), width=1, antialias=True, alpha=-1):
"""
**SUMMARY**
Draw a grid on the image
**PARAMETERS**
* *dimensions* - No of rows and cols as an (rows,xols) tuple or list.
* *color* - Grid's color as a tuple or list.
* *width* - The grid line width in pixels.
* *antialias* - Draw an antialiased object
* *aplha* - The alpha blending for the object. If this value is -1 then the
layer default value is used. A value of 255 means opaque, while 0 means transparent.
**RETURNS**
Returns the index of the drawing layer of the grid
**EXAMPLE**
>>>> img = Image('something.png')
>>>> img.grid([20,20],(255,0,0))
>>>> img.grid((20,20),(255,0,0),1,True,0)
"""
retVal = self.copy()
try:
step_row = self.size()[1]/dimensions[0]
step_col = self.size()[0]/dimensions[1]
except ZeroDivisionError:
return imgTemp
i = 1
j = 1
grid = DrawingLayer(self.size()) #add a new layer for grid
while( (i < dimensions[0]) and (j < dimensions[1]) ):
if( i < dimensions[0] ):
grid.line((0,step_row*i), (self.size()[0],step_row*i), color, width, antialias, alpha)
i = i + 1
if( j < dimensions[1] ):
grid.line((step_col*j,0), (step_col*j,self.size()[1]), color, width, antialias, alpha)
j = j + 1
retVal._gridLayer[0] = retVal.addDrawingLayer(grid) # store grid layer index
retVal._gridLayer[1] = dimensions
return retVal
def removeGrid(self):
"""
**SUMMARY**
Remove Grid Layer from the Image.
**PARAMETERS**
None
**RETURNS**
Drawing Layer corresponding to the Grid Layer
**EXAMPLE**
>>>> img = Image('something.png')
>>>> img.grid([20,20],(255,0,0))
>>>> gridLayer = img.removeGrid()
"""
if self._gridLayer[0] is not None:
grid = self.removeDrawingLayer(self._gridLayer[0])
self._gridLayer=[None,[0, 0]]
return grid
else:
return None
def findGridLines(self):
"""
**SUMMARY**
Return Grid Lines as a Line Feature Set
**PARAMETERS**
None
**RETURNS**
Grid Lines as a Feature Set
**EXAMPLE**
>>>> img = Image('something.png')
>>>> img.grid([20,20],(255,0,0))
>>>> lines = img.findGridLines()
"""
gridIndex = self.getDrawingLayer(self._gridLayer[0])
if self._gridLayer[0]==-1:
print "Cannot find grid on the image, Try adding a grid first"
lineFS = FeatureSet()
try:
step_row = self.size()[1]/self._gridLayer[1][0]
step_col = self.size()[0]/self._gridLayer[1][1]
except ZeroDivisionError:
return None
i = 1
j = 1
while( i < self._gridLayer[1][0] ):
lineFS.append(Line(self,((0,step_row*i), (self.size()[0],step_row*i))))
i = i + 1
while( j < self._gridLayer[1][1] ):
lineFS.append(Line(self,((step_col*j,0), (step_col*j,self.size()[1]))))
j = j + 1
return lineFS
def logicalAND(self, img, grayscale=True):
"""
**SUMMARY**
Perform bitwise AND operation on images
**PARAMETERS**
img - the bitwise operation to be performed with
grayscale
**RETURNS**
SimpleCV.ImageClass.Image
**EXAMPLE**
>>> img = Image("something.png")
>>> img1 = Image("something_else.png")
>>> img.logicalAND(img1, grayscale=False)
>>> img.logicalAND(img1)
"""
if not self.size() == img.size():
print "Both images must have same sizes"
return None
try:
import cv2
except ImportError:
print "This function is available for OpenCV >= 2.3"
if grayscale:
retval = cv2.bitwise_and(self.getGrayNumpyCv2(), img.getGrayNumpyCv2())
else:
retval = cv2.bitwise_and(self.getNumpyCv2(), img.getNumpyCv2())
return Image(retval, cv2image=True)
def logicalNAND(self, img, grayscale=True):
"""
**SUMMARY**
Perform bitwise NAND operation on images
**PARAMETERS**
img - the bitwise operation to be performed with
grayscale
**RETURNS**
SimpleCV.ImageClass.Image
**EXAMPLE**
>>> img = Image("something.png")
>>> img1 = Image("something_else.png")
>>> img.logicalNAND(img1, grayscale=False)
>>> img.logicalNAND(img1)
"""
if not self.size() == img.size():
print "Both images must have same sizes"
return None
try:
import cv2
except ImportError:
print "This function is available for OpenCV >= 2.3"
if grayscale:
retval = cv2.bitwise_and(self.getGrayNumpyCv2(), img.getGrayNumpyCv2())
else:
retval = cv2.bitwise_and(self.getNumpyCv2(), img.getNumpyCv2())
retval = cv2.bitwise_not(retval)
return Image(retval, cv2image=True)
def logicalOR(self, img, grayscale=True):
"""
**SUMMARY**
Perform bitwise OR operation on images
**PARAMETERS**
img - the bitwise operation to be performed with
grayscale
**RETURNS**
SimpleCV.ImageClass.Image
**EXAMPLE**
>>> img = Image("something.png")
>>> img1 = Image("something_else.png")
>>> img.logicalOR(img1, grayscale=False)
>>> img.logicalOR(img1)
"""
if not self.size() == img.size():
print "Both images must have same sizes"
return None
try:
import cv2
except ImportError:
print "This function is available for OpenCV >= 2.3"
if grayscale:
retval = cv2.bitwise_or(self.getGrayNumpyCv2(), img.getGrayNumpyCv2())
else:
retval = cv2.bitwise_or(self.getNumpyCv2(), img.getNumpyCv2())
return Image(retval, cv2image=True)
def logicalXOR(self, img, grayscale=True):
"""
**SUMMARY**
Perform bitwise XOR operation on images
**PARAMETERS**
img - the bitwise operation to be performed with
grayscale
**RETURNS**
SimpleCV.ImageClass.Image
**EXAMPLE**
>>> img = Image("something.png")
>>> img1 = Image("something_else.png")
>>> img.logicalXOR(img1, grayscale=False)
>>> img.logicalXOR(img1)
"""
if not self.size() == img.size():
print "Both images must have same sizes"
return None
try:
import cv2
except ImportError:
print "This function is available for OpenCV >= 2.3"
if grayscale:
retval = cv2.bitwise_xor(self.getGrayNumpyCv2(), img.getGrayNumpyCv2())
else:
retval = cv2.bitwise_xor(self.getNumpyCv2(), img.getNumpyCv2())
return Image(retval, cv2image=True)
def matchSIFTKeyPoints(self, template, quality=200):
"""
**SUMMARY**
matchSIFTKeypoint allows you to match a template image with another image using
SIFT keypoints. The method extracts keypoints from each image, uses the Fast Local
Approximate Nearest Neighbors algorithm to find correspondences between the feature
points, filters the correspondences based on quality.
This method should be able to handle a reasonable changes in camera orientation and
illumination. Using a template that is close to the target image will yield much
better results.
**PARAMETERS**
* *template* - A template image.
* *quality* - The feature quality metric. This can be any value between about 100 and 500. Lower
values should return fewer, but higher quality features.
**RETURNS**
A Tuple of lists consisting of matched KeyPoints found on the image and matched
keypoints found on the template. keypoints are sorted according to lowest distance.
**EXAMPLE**
>>> template = Image("template.png")
>>> img = camera.getImage()
>>> fs = img.macthSIFTKeyPoints(template)
**SEE ALSO**
:py:meth:`_getRawKeypoints`
:py:meth:`_getFLANNMatches`
:py:meth:`drawKeypointMatches`
:py:meth:`findKeypoints`
"""
try:
import cv2
except ImportError:
warnings.warn("OpenCV >= 2.4.3 required")
return None
if not hasattr(cv2, "FeatureDetector_create"):
warnings.warn("OpenCV >= 2.4.3 required")
return None
if template == None:
return None
detector = cv2.FeatureDetector_create("SIFT")
descriptor = cv2.DescriptorExtractor_create("SIFT")
img = self.getNumpyCv2()
template_img = template.getNumpyCv2()
skp = detector.detect(img)
skp, sd = descriptor.compute(img, skp)
tkp = detector.detect(template_img)
tkp, td = descriptor.compute(template_img, tkp)
idx, dist = self._getFLANNMatches(sd, td)
dist = dist[:,0]/2500.0
dist = dist.reshape(-1,).tolist()
idx = idx.reshape(-1).tolist()
indices = range(len(dist))
indices.sort(key=lambda i: dist[i])
dist = [dist[i] for i in indices]
idx = [idx[i] for i in indices]
sfs = []
for i, dis in itertools.izip(idx, dist):
if dis < quality:
sfs.append(KeyPoint(template, skp[i], sd, "SIFT"))
else:
break #since sorted
idx, dist = self._getFLANNMatches(td, sd)
dist = dist[:,0]/2500.0
dist = dist.reshape(-1,).tolist()
idx = idx.reshape(-1).tolist()
indices = range(len(dist))
indices.sort(key=lambda i: dist[i])
dist = [dist[i] for i in indices]
idx = [idx[i] for i in indices]
tfs = []
for i, dis in itertools.izip(idx, dist):
if dis < quality:
tfs.append(KeyPoint(template, tkp[i], td, "SIFT"))
else:
break
return sfs, tfs
def drawSIFTKeyPointMatch(self, template, distance=200, num=-1, width=1):
"""
**SUMMARY**
Draw SIFT keypoints draws a side by side representation of two images, calculates
keypoints for both images, determines the keypoint correspondences, and then draws
the correspondences. This method is helpful for debugging keypoint calculations
and also looks really cool :) . The parameters mirror the parameters used
for findKeypointMatches to assist with debugging
**PARAMETERS**
* *template* - A template image.
* *distance* - This can be any value between about 100 and 500. Lower value should
return less number of features but higher quality features.
* *num* - Number of features you want to draw. Features are sorted according to the
dist from min to max.
* *width* - The width of the drawn line.
**RETURNS**
A side by side image of the template and source image with each feature correspondence
draw in a different color.
**EXAMPLE**
>>> img = cam.getImage()
>>> template = Image("myTemplate.png")
>>> result = img.drawSIFTKeypointMatch(self,template,300.00):
**SEE ALSO**
:py:meth:`drawKeypointMatches`
:py:meth:`findKeypoints`
:py:meth:`findKeypointMatch`
"""
if template == None:
return
resultImg = template.sideBySide(self,scale=False)
hdif = (self.height-template.height)/2
sfs, tfs = self.matchSIFTKeyPoints(template, distance)
maxlen = min(len(sfs), len(tfs))
if num < 0 or num > maxlen:
num = maxlen
for i in range(num):
skp = sfs[i]
tkp = tfs[i]
pt_a = (int(tkp.y), int(tkp.x)+hdif)
pt_b = (int(skp.y)+template.width, int(skp.x))
resultImg.drawLine(pt_a, pt_b, color=Color.getRandom(),thickness=width)
return resultImg
def stegaEncode(self,message):
"""
**SUMMARY**
A simple steganography tool for hidding messages in images.
**PARAMETERS**
* *message* -A message string that you would like to encode.
**RETURNS**
Your message encoded in the returning image.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> img2 = img.stegaEncode("HELLO WORLD!")
>>>> img2.save("TopSecretImg.png")
>>>> img3 = Image("TopSecretImg.png")
>>>> img3.stegaDecode()
**NOTES**
More here:
http://en.wikipedia.org/wiki/Steganography
You will need to install stepic:
http://domnit.org/stepic/doc/pydoc/stepic.html
You may need to monkey with jpeg compression
as it seems to degrade the encoded message.
PNG sees to work quite well.
"""
try:
import stepic
except ImportError:
logger.warning("stepic library required")
return None
warnings.simplefilter("ignore")
pilImg = pil.frombuffer("RGB",self.size(),self.toString())
stepic.encode_inplace(pilImg,message)
retVal = Image(pilImg)
return retVal.flipVertical()
def stegaDecode(self):
"""
**SUMMARY**
A simple steganography tool for hidding and finding
messages in images.
**RETURNS**
Your message decoded in the image.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> img2 = img.stegaEncode("HELLO WORLD!")
>>>> img2.save("TopSecretImg.png")
>>>> img3 = Image("TopSecretImg.png")
>>>> img3.stegaDecode()
**NOTES**
More here:
http://en.wikipedia.org/wiki/Steganography
You will need to install stepic:
http://domnit.org/stepic/doc/pydoc/stepic.html
You may need to monkey with jpeg compression
as it seems to degrade the encoded message.
PNG sees to work quite well.
"""
try:
import stepic
except ImportError:
logger.warning("stepic library required")
return None
warnings.simplefilter("ignore")
pilImg = pil.frombuffer("RGB",self.size(),self.toString())
result = stepic.decode(pilImg)
return result
def findFeatures(self, method="szeliski", threshold=1000):
"""
**SUMMARY**
Find szeilski or Harris features in the image.
Harris features correspond to Harris corner detection in the image.
Read more:
Harris Features: http://en.wikipedia.org/wiki/Corner_detection
szeliski Features: http://research.microsoft.com/en-us/um/people/szeliski/publications.htm
**PARAMETERS**
* *method* - Features type
* *threshold* - threshold val
**RETURNS**
A list of Feature objects corrseponding to the feature points.
**EXAMPLE**
>>> img = Image("corner_sample.png")
>>> fpoints = img.findFeatures("harris", 2000)
>>> for f in fpoints:
... f.draw()
>>> img.show()
**SEE ALSO**
:py:meth:`drawKeypointMatches`
:py:meth:`findKeypoints`
:py:meth:`findKeypointMatch`
"""
try:
import cv2
except ImportError:
logger.warning("OpenCV >= 2.3.0 required")
return None
img = self.getGrayNumpyCv2()
blur = cv2.GaussianBlur(img, (3, 3), 0)
Ix = cv2.Sobel(blur, cv2.CV_32F, 1, 0)
Iy = cv2.Sobel(blur, cv2.CV_32F, 0, 1)
Ix_Ix = np.multiply(Ix, Ix)
Iy_Iy = np.multiply(Iy, Iy)
Ix_Iy = np.multiply(Ix, Iy)
Ix_Ix_blur = cv2.GaussianBlur(Ix_Ix, (5, 5), 0)
Iy_Iy_blur = cv2.GaussianBlur(Iy_Iy, (5, 5), 0)
Ix_Iy_blur = cv2.GaussianBlur(Ix_Iy, (5, 5), 0)
harris_thresh = threshold*5000
alpha = 0.06
detA = Ix_Ix_blur * Iy_Iy_blur - Ix_Iy_blur**2
traceA = Ix_Ix_blur + Iy_Iy_blur
feature_list = []
if method == "szeliski":
harmonic_mean = detA / traceA
for j, i in np.argwhere(harmonic_mean > threshold):
feature_list.append(Feature(self, i, j, ((i, j), (i, j), (i, j), (i, j))))
elif method == "harris":
harris_function = detA - (alpha*traceA*traceA)
for j,i in np.argwhere(harris_function > harris_thresh):
feature_list.append(Feature(self, i, j, ((i, j), (i, j), (i, j), (i, j))))
else:
logger.warning("Invalid method.")
return None
return feature_list
def watershed(self, mask=None, erode=2,dilate=2, useMyMask=False):
"""
**SUMMARY**
Implements the Watershed algorithm on the input image.
Read more:
Watershed: "http://en.wikipedia.org/wiki/Watershed_(image_processing)"
**PARAMETERS**
* *mask* - an optional binary mask. If none is provided we do a binarize and invert.
* *erode* - the number of times to erode the mask to find the foreground.
* *dilate* - the number of times to dilate the mask to find possible background.
* *useMyMask* - if this is true we do not modify the mask.
**RETURNS**
The Watershed image
**EXAMPLE**
>>> img = Image("/sampleimages/wshed.jpg")
>>> img1 = img.watershed()
>>> img1.show()
# here is an example of how to create your own mask
>>> img = Image('lenna')
>>> myMask = Image((img.width,img.height))
>>> myMask = myMask.floodFill((0,0),color=Color.WATERSHED_BG)
>>> mask = img.threshold(128)
>>> myMask = (myMask-mask.dilate(2)+mask.erode(2))
>>> result = img.watershed(mask=myMask,useMyMask=True)
**SEE ALSO**
Color.WATERSHED_FG - The watershed foreground color
Color.WATERSHED_BG - The watershed background color
Color.WATERSHED_UNSURE - The watershed not sure if fg or bg color.
TODO: Allow the user to pass in a function that defines the watershed mask.
"""
try:
import cv2
except ImportError:
logger.warning("OpenCV >= 2.3.0 required")
return None
output = self.getEmpty(3)
if mask is None:
mask = self.binarize().invert()
newmask = None
if( not useMyMask ):
newmask = Image((self.width,self.height))
newmask = newmask.floodFill((0,0),color=Color.WATERSHED_BG)
newmask = (newmask-mask.dilate(dilate)+mask.erode(erode))
else:
newmask = mask
m = np.int32(newmask.getGrayNumpyCv2())
cv2.watershed(self.getNumpyCv2(),m)
m = cv2.convertScaleAbs(m)
ret,thresh = cv2.threshold(m,0,255,cv2.cv.CV_THRESH_OTSU)
retVal = Image(thresh,cv2image=True)
return retVal
def findBlobsFromWatershed(self,mask=None,erode=2,dilate=2,useMyMask=False,invert=False,minsize=20,maxsize=None):
"""
**SUMMARY**
Implements the watershed algorithm on the input image with an optional mask and t
hen uses the mask to find blobs.
Read more:
Watershed: "http://en.wikipedia.org/wiki/Watershed_(image_processing)"
**PARAMETERS**
* *mask* - an optional binary mask. If none is provided we do a binarize and invert.
* *erode* - the number of times to erode the mask to find the foreground.
* *dilate* - the number of times to dilate the mask to find possible background.
* *useMyMask* - if this is true we do not modify the mask.
* *invert* - invert the resulting mask before finding blobs.
* *minsize* - minimum blob size in pixels.
* *maxsize* - the maximum blob size in pixels.
**RETURNS**
A feature set of blob features.
**EXAMPLE**
>>> img = Image("/sampleimages/wshed.jpg")
>>> mask = img.threshold(100).dilate(3)
>>> blobs = img.findBlobsFromWatershed(mask)
>>> blobs.show()
**SEE ALSO**
Color.WATERSHED_FG - The watershed foreground color
Color.WATERSHED_BG - The watershed background color
Color.WATERSHED_UNSURE - The watershed not sure if fg or bg color.
"""
newmask = self.watershed(mask,erode,dilate,useMyMask)
if( invert ):
newmask = mask.invert()
return self.findBlobsFromMask(newmask,minsize=minsize,maxsize=maxsize)
def maxValue(self,locations=False):
"""
**SUMMARY**
Returns the brightest/maximum pixel value in the
grayscale image. This method can also return the
locations of pixels with this value.
**PARAMETERS**
* *locations* - If true return the location of pixels
that have this value.
**RETURNS**
The maximum value and optionally the list of points as
a list of (x,y) tuples.
**EXAMPLE**
>>> img = Image("lenna")
>>> max = img.maxValue()
>>> min, pts = img.minValue(locations=True)
>>> img2 = img.stretch(min,max)
"""
if(locations):
val = np.max(self.getGrayNumpy())
x,y = np.where(self.getGrayNumpy()==val)
locs = zip(x.tolist(),y.tolist())
return int(val),locs
else:
val = np.max(self.getGrayNumpy())
return int(val)
def minValue(self,locations=False):
"""
**SUMMARY**
Returns the darkest/minimum pixel value in the
grayscale image. This method can also return the
locations of pixels with this value.
**PARAMETERS**
* *locations* - If true return the location of pixels
that have this value.
**RETURNS**
The minimum value and optionally the list of points as
a list of (x,y) tuples.
**EXAMPLE**
>>> img = Image("lenna")
>>> max = img.maxValue()
>>> min, pts = img.minValue(locations=True)
>>> img2 = img.stretch(min,max)
"""
if(locations):
val = np.min(self.getGrayNumpy())
x,y = np.where(self.getGrayNumpy()==val)
locs = zip(x.tolist(),y.tolist())
return int(val),locs
else:
val = np.min(self.getGrayNumpy())
return int(val)
def findKeypointClusters(self, num_of_clusters = 5, order='dsc', flavor='surf'):
'''
This function is meant to try and find interesting areas of an
image. It does this by finding keypoint clusters in an image.
It uses keypoint (ORB) detection to locate points of interest
and then uses kmeans clustering to get the X,Y coordinates of
those clusters of keypoints. You provide the expected number
of clusters and you will get back a list of the X,Y coordinates
and rank order of the number of Keypoints around those clusters
**PARAMETERS**
* num_of_clusters - The number of clusters you are looking for (default: 5)
* order - The rank order you would like the points returned in, dsc or asc, (default: dsc)
* flavor - The keypoint type, or 'corner' for just corners
**EXAMPLE**
>>> img = Image('simplecv')
>>> clusters = img.findKeypointClusters()
>>> clusters.draw()
>>> img.show()
**RETURNS**
FeatureSet
'''
if flavor.lower() == 'corner':
keypoints = self.findCorners() #fallback to corners
else:
keypoints = self.findKeypoints(flavor=flavor.upper()) #find the keypoints
if keypoints == None or keypoints <= 0:
return None
xypoints = np.array([(f.x,f.y) for f in keypoints])
xycentroids, xylabels = scv.kmeans2(xypoints, num_of_clusters) # find the clusters of keypoints
xycounts = np.array([])
for i in range(num_of_clusters ): #count the frequency of occurences for sorting
xycounts = np.append(xycounts, len(np.where(xylabels == i)[-1]))
merged = np.msort(np.hstack((np.vstack(xycounts), xycentroids))) #sort based on occurence
clusters = [c[1:] for c in merged] # strip out just the values ascending
if order.lower() == 'dsc':
clusters = clusters[::-1] #reverse if descending
fs = FeatureSet()
for x,y in clusters: #map the values to a feature set
f = Corner(self, x, y)
fs.append(f)
return fs
def getFREAKDescriptor(self, flavor="SURF"):
"""
**SUMMARY**
Compute FREAK Descriptor of given keypoints.
FREAK - Fast Retina Keypoints.
Read more: http://www.ivpe.com/freak.htm
Keypoints can be extracted using following detectors.
- SURF
- SIFT
- BRISK
- ORB
- STAR
- MSER
- FAST
- Dense
**PARAMETERS**
* *flavor* - Detector (see above list of detectors) - string
**RETURNS**
* FeatureSet* - A feature set of KeyPoint Features.
* Descriptor* - FREAK Descriptor
**EXAMPLE**
>>> img = Image("lenna")
>>> fs, des = img.getFREAKDescriptor("ORB")
"""
try:
import cv2
except ImportError:
warnings.warn("OpenCV version >= 2.4.2 requierd")
return None
if cv2.__version__.startswith('$Rev:'):
warnings.warn("OpenCV version >= 2.4.2 requierd")
return None
if int(cv2.__version__.replace('.','0'))<20402:
warnings.warn("OpenCV version >= 2.4.2 requierd")
return None
flavors = ["SIFT", "SURF", "BRISK", "ORB", "STAR", "MSER", "FAST", "Dense"]
if flavor not in flavors:
warnings.warn("Unkown Keypoints detector. Returning None.")
return None
detector = cv2.FeatureDetector_create(flavor)
extractor = cv2.DescriptorExtractor_create("FREAK")
self._mKeyPoints = detector.detect(self.getGrayNumpyCv2())
self._mKeyPoints, self._mKPDescriptors = extractor.compute(self.getGrayNumpyCv2(),
self._mKeyPoints)
fs = FeatureSet()
for i in range(len(self._mKeyPoints)):
fs.append(KeyPoint(self, self._mKeyPoints[i], self._mKPDescriptors[i], flavor))
return fs, self._mKPDescriptors
def getGrayHistogramCounts(self, bins = 255, limit=-1):
'''
This function returns a list of tuples of greyscale pixel counts
by frequency. This would be useful in determining the dominate
pixels (peaks) of the greyscale image.
**PARAMETERS**
* *bins* - The number of bins for the hisogram, defaults to 255 (greyscale)
* *limit* - The number of counts to return, default is all
**RETURNS**
* List * - A list of tuples of (frequency, value)
**EXAMPLE**
>>> img = Image("lenna")
>>> counts = img.getGrayHistogramCounts()
>>> counts[0] #the most dominate pixel color tuple of frequency and value
>>> counts[1][1] #the second most dominate pixel color value
'''
hist = self.histogram(bins)
vals = [(e,h) for h,e in enumerate(hist)]
vals.sort()
vals.reverse()
if limit == -1:
limit = bins
return vals[:limit]
def grayPeaks(self, bins = 255, delta = 0, lookahead = 15):
"""
**SUMMARY**
Takes the histogram of a grayscale image, and returns the peak
grayscale intensity values.
The bins parameter can be used to lump grays together, by default it is
set to 255
Returns a list of tuples, each tuple contains the grayscale intensity,
and the fraction of the image that has it.
**PARAMETERS**
* *bins* - the integer number of bins, between 1 and 255.
* *delta* - the minimum difference betweena peak and the following points,
before a peak may be considered a peak.Useful to hinder the
algorithm from picking up false peaks towards to end of
the signal.
* *lookahead* - the distance to lookahead from a peakto determine if it is
an actual peak, should be an integer greater than 0.
**RETURNS**
A list of (grays,fraction) tuples.
**NOTE**
Implemented using the techniques used in huetab()
"""
# The bins are the no of edges bounding an histogram.
# Thus bins= Number of bars in histogram+1
# As range() function is exclusive,
# hence bins+2 is passed as parameter.
y_axis, x_axis = np.histogram(self.getGrayNumpy(), bins = range(bins+2))
x_axis = x_axis[0:bins+1]
maxtab = []
mintab = []
length = len(y_axis)
if x_axis is None:
x_axis = range(length)
#perform some checks
if length != len(x_axis):
raise ValueError, "Input vectors y_axis and x_axis must have same length"
if lookahead < 1:
raise ValueError, "Lookahead must be above '1' in value"
if not (np.isscalar(delta) and delta >= 0):
raise ValueError, "delta must be a positive number"
#needs to be a numpy array
y_axis = np.asarray(y_axis)
#maxima and minima candidates are temporarily stored in
#mx and mn respectively
mn, mx = np.Inf, -np.Inf
#Only detect peak if there is 'lookahead' amount of points after it
for index, (x, y) in enumerate(zip(x_axis[:-lookahead], y_axis[:-lookahead])):
if y > mx:
mx = y
mxpos = x
if y < mn:
mn = y
mnpos = x
####look for max####
if y < mx-delta and mx != np.Inf:
#Maxima peak candidate found
#look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index:index+lookahead].max() < mx:
maxtab.append((mxpos, mx))
#set algorithm to only find minima now
mx = np.Inf
mn = np.Inf
if y > mn+delta and mn != -np.Inf:
#Minima peak candidate found
#look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index:index+lookahead].min() > mn:
mintab.append((mnpos, mn))
#set algorithm to only find maxima now
mn = -np.Inf
mx = -np.Inf
retVal = []
for intensity, pixelcount in maxtab:
retVal.append((intensity, pixelcount / float(self.width * self.height)))
return retVal
def tvDenoising(self, gray=False, weight=50, eps=0.0002, max_iter=200, resize=1):
"""
**SUMMARY**
Performs Total Variation Denoising, this filter tries to minimize the
total-variation of the image.
see : http://en.wikipedia.org/wiki/Total_variation_denoising
**Parameters**
* *gray* - Boolean value which identifies the colorspace of
the input image. If set to True, filter uses gray scale values,
otherwise colorspace is used.
* *weight* - Denoising weight, it controls the extent of denoising.
* *eps* - Stopping criteria for the algorithm. If the relative difference
of the cost function becomes less than this value, the algorithm stops.
* *max_iter* - Determines the maximum number of iterations the algorithm
goes through for optimizing.
* *resize* - Parameter to scale up/down the image. If set to
1 filter is applied on the original image. This parameter is
mostly to speed up the filter.
**NOTE**
This function requires Scikit-image library to be installed!
To install scikit-image library run::
sudo pip install -U scikit-image
Read More: http://scikit-image.org/
"""
try:
from skimage.filter import denoise_tv_chambolle
except ImportError:
logger.warn('Scikit-image Library not installed!')
return None
img = self.copy()
if resize <= 0:
print 'Enter a valid resize value'
return None
if resize != 1:
img = img.resize(int(img.width*resize),int(img.height*resize))
if gray is True:
img = img.getGrayNumpy()
multichannel = False
elif gray is False:
img = img.getNumpy()
multichannel = True
else:
warnings.warn('gray value not valid')
return None
denoise_mat = denoise_tv_chambolle(img,weight,eps,max_iter,multichannel)
retVal = img * denoise_mat
retVal = Image(retVal)
if resize != 1:
return retVal.resize(int(retVal.width/resize),int(retVal.width/resize))
else:
return retVal
def motionBlur(self,intensity=15, direction='NW'):
"""
**SUMMARY**
Performs the motion blur of an Image. Uses different filters to find out
the motion blur in different directions.
see : https://en.wikipedia.org/wiki/Motion_blur
**Parameters**
* *intensity* - The intensity of the motion blur effect. Basically defines
the size of the filter used in the process. It has to be an integer.
0 intensity implies no blurring.
* *direction* - The direction of the motion. It is a string taking values
left, right, up, down as well as N, S, E, W for north, south, east, west
and NW, NE, SW, SE for northwest and so on.
default is NW
**RETURNS**
An image with the specified motion blur filter applied.
**EXAMPLE**
>>> i = Image ('lenna')
>>> mb = i.motionBlur()
>>> mb.show()
"""
mid = int(intensity/2)
tmp = np.identity(intensity)
if intensity == 0:
warnings.warn("0 intensity means no blurring")
return self
elif intensity % 2 is 0:
div=mid
for i in range(mid, intensity-1):
tmp[i][i] = 0
else:
div=mid+1
for i in range(mid+1, intensity-1):
tmp[i][i]=0
if direction == 'right' or direction.upper() == 'E':
kernel = np.concatenate((np.zeros((1,mid)),np.ones((1,mid+1))),axis=1)
elif direction == 'left' or direction.upper() == 'W':
kernel = np.concatenate((np.ones((1,mid+1)),np.zeros((1,mid))),axis=1)
elif direction == 'up' or direction.upper() == 'N':
kernel = np.concatenate((np.ones((1+mid,1)),np.zeros((mid,1))),axis=0)
elif direction == 'down' or direction.upper() == 'S':
kernel = np.concatenate((np.zeros((mid,1)),np.ones((mid+1,1))),axis=0)
elif direction.upper() == 'NW':
kernel = tmp
elif direction.upper() == 'NE':
kernel = np.fliplr(tmp)
elif direction.upper() == 'SW':
kernel = np.flipud(tmp)
elif direction.upper() == 'SE':
kernel = np.flipud(np.fliplr(tmp))
else:
warnings.warn("Please enter a proper direction")
return None
retval=self.convolve(kernel=kernel/div)
return retval
def recognizeFace(self, recognizer=None):
"""
**SUMMARY**
Find faces in the image using FaceRecognizer and predict their class.
**PARAMETERS**
* *recognizer* - Trained FaceRecognizer object
**EXAMPLES**
>>> cam = Camera()
>>> img = cam.getImage()
>>> recognizer = FaceRecognizer()
>>> recognizer.load("training.xml")
>>> print img.recognizeFace(recognizer)
"""
try:
import cv2
if not hasattr(cv2, "createFisherFaceRecognizer"):
warnings.warn("OpenCV >= 2.4.4 required to use this.")
return None
except ImportError:
warnings.warn("OpenCV >= 2.4.4 required to use this.")
return None
if not isinstance(recognizer, FaceRecognizer):
warnings.warn("SimpleCV.Features.FaceRecognizer object required.")
return None
w, h = recognizer.imageSize
label = recognizer.predict(self.resize(w, h))
return label
def findAndRecognizeFaces(self, recognizer, cascade=None):
"""
**SUMMARY**
Predict the class of the face in the image using FaceRecognizer.
**PARAMETERS**
* *recognizer* - Trained FaceRecognizer object
* *cascade* -haarcascade which would identify the face
in the image.
**EXAMPLES**
>>> cam = Camera()
>>> img = cam.getImage()
>>> recognizer = FaceRecognizer()
>>> recognizer.load("training.xml")
>>> feat = img.findAndRecognizeFaces(recognizer, "face.xml")
>>> for feature, label, confidence in feat:
... i = feature.crop()
... i.drawText(str(label))
... i.show()
"""
try:
import cv2
if not hasattr(cv2, "createFisherFaceRecognizer"):
warnings.warn("OpenCV >= 2.4.4 required to use this.")
return None
except ImportError:
warnings.warn("OpenCV >= 2.4.4 required to use this.")
return None
if not isinstance(recognizer, FaceRecognizer):
warnings.warn("SimpleCV.Features.FaceRecognizer object required.")
return None
if not cascade:
cascade = "/".join([LAUNCH_PATH,"/Features/HaarCascades/face.xml"])
faces = self.findHaarFeatures(cascade)
if not faces:
warnings.warn("Faces not found in the image.")
return None
retVal = []
for face in faces:
label, confidence = face.crop().recognizeFace(recognizer)
retVal.append([face, label, confidence])
return retVal
def channelMixer(self, channel = 'r', weight = (100,100,100)):
"""
**SUMMARY**
Mixes channel of an RGB image based on the weights provided. The output is given at the
channel provided in the parameters. Basically alters the value of one channelg of an RGB
image based in the values of other channels and itself. If the image is not RGB then first
converts the image to RGB and then mixes channel
**PARAMETERS**
* *channel* - The output channel in which the values are to be replaced.
It can have either 'r' or 'g' or 'b'
* *weight* - The weight of each channel in calculation of the mixed channel.
It is a tuple having 3 values mentioning the percentage of the value of the
channels, from -200% to 200%
**RETURNS**
A SimpleCV RGB Image with the provided channel replaced with the mixed channel.
**EXAMPLE**
>>> img = Image("lenna")
>>> img2 = img.channelMixer()
>>> Img3 = img.channelMixer(channel = 'g', weights = (3,2,1))
**NOTE**
Read more at http://docs.gimp.org/en/plug-in-colors-channel-mixer.html
"""
r, g, b = self.splitChannels()
if weight[0] > 200 or weight[1] > 200 or weight[2] >= 200:
if weight[0] <-200 or weight[1] < -200 or weight[2] < -200:
warnings.warn('Value of weights can be from -200 to 200%')
return None
weight = map(float,weight)
channel = channel.lower()
if channel == 'r':
r = r*(weight[0]/100.0) + g*(weight[1]/100.0) + b*(weight[2]/100.0)
elif channel == 'g':
g = r*(weight[0]/100.0) + g*(weight[1]/100.0) + b*(weight[2]/100.0)
elif channel == 'b':
b = r*(weight[0]/100.0) + g*(weight[1]/100.0) + b*(weight[2]/100.0)
else:
warnings.warn('Please enter a valid channel(r/g/b)')
return None
retVal = self.mergeChannels(r = r, g = g, b = b)
return retVal
def prewitt(self):
"""
**SUMMARY**
Prewitt operator for edge detection
**PARAMETERS**
None
**RETURNS**
Image with prewitt opeartor applied on it
**EXAMPLE**
>>> img = Image("lenna")
>>> p = img.prewitt()
>>> p.show()
**NOTES**
Read more at: http://en.wikipedia.org/wiki/Prewitt_operator
"""
img = self.copy()
grayimg = img.grayscale()
gx = [[1,1,1],[0,0,0],[-1,-1,-1]]
gy = [[-1,0,1],[-1,0,1],[-1,0,1]]
grayx = grayimg.convolve(gx)
grayy = grayimg.convolve(gy)
grayxnp = np.uint64(grayx.getGrayNumpy())
grayynp = np.uint64(grayy.getGrayNumpy())
retVal = Image(np.sqrt(grayxnp**2+grayynp**2))
return retVal
def edgeSnap(self,pointList,step = 1):
"""
**SUMMARY**
Given a List of points finds edges closet to the line joining two
successive points, edges are returned as a FeatureSet of
Lines.
Note : Image must be binary, it is assumed that prior conversion is done
**Parameters**
* *pointList* - List of points to be checked for nearby edges.
* *step* - Number of points to skip if no edge is found in vicinity.
Keep this small if you want to sharply follow a curve
**RETURNS**
* FeatureSet * - A FeatureSet of Lines
**EXAMPLE**
>>> image = Image("logo").edges()
>>> edgeLines = image.edgeSnap([(50,50),(230,200)])
>>> edgeLines.draw(color = Color.YELLOW,width = 3)
"""
imgArray = self.getGrayNumpy()
c1 = np.count_nonzero(imgArray )
c2 = np.count_nonzero(imgArray - 255)
#checking that all values are 0 and 255
if( c1 + c2 != imgArray.size):
raise ValueError,"Image must be binary"
if(len(pointList) < 2 ):
return None
finalList = [pointList[0]]
featureSet = FeatureSet()
last = pointList[0]
for point in pointList[1:None]:
finalList += self._edgeSnap2(last,point,step)
last = point
last = finalList[0]
for point in finalList:
featureSet.append(Line(self,(last,point)))
last = point
return featureSet
def _edgeSnap2(self,start,end,step):
"""
**SUMMARY**
Given a two points returns a list of edge points closet to the line joining the points
Point is a tuple of two numbers
Note : Image must be binary
**Parameters**
* *start* - First Point
* *end* - Second Point
* *step* - Number of points to skip if no edge is found in vicinity
Keep this low to detect sharp curves
**RETURNS**
* List * - A list of tuples , each tuple contains (x,y) values
"""
edgeMap = np.copy(self.getGrayNumpy())
#Size of the box around a point which is checked for edges.
box = step*4
xmin = min(start[0],end[0])
xmax = max(start[0],end[0])
ymin = min(start[1],end[1])
ymax = max(start[1],end[1])
line = self.bresenham_line(start,end)
#List of Edge Points.
finalList = []
i = 0
#Closest any point has ever come to the end point
overallMinDist = None
while i < len(line) :
x,y = line[i]
#Get the matrix of points fromx around current point.
region = edgeMap[x-box:x+box,y-box:y+box]
#Condition at the boundary of the image
if(region.shape[0] == 0 or region.shape[1] == 0):
i += step
continue
#Index of all Edge points
indexList = np.argwhere(region>0)
if (indexList.size > 0):
#Center the coordinates around the point
indexList -= box
minDist = None
# Incase multiple edge points exist, choose the one closest
# to the end point
for ix,iy in indexList:
dist = math.hypot(x+ix-end[0],iy+y-end[1])
if(minDist ==None or dist < minDist ):
dx,dy = ix,iy
minDist = dist
# The distance of the new point is compared with the least
# distance computed till now, the point is rejected if it's
# comparitively more. This is done so that edge points don't
# wrap around a curve instead of heading towards the end point
if(overallMinDist!= None and minDist > overallMinDist*1.1):
i+=step
continue
if( overallMinDist == None or minDist < overallMinDist ):
overallMinDist = minDist
# Reset the points in the box so that they are not detected
# during the next iteration.
edgeMap[x-box:x+box,y-box:y+box] = 0
# Keep all the points in the bounding box
if( xmin <= x+dx <= xmax and ymin <= y+dx <=ymax):
#Add the point to list and redefine the line
line =[(x+dx,y+dy)] + self.bresenham_line((x+dx, y+dy), end)
finalList += [(x+dx,y+dy)]
i = 0
i += step
finalList += [end]
return finalList
def motionBlur(self,intensity=15, angle = 0):
"""
**SUMMARY**
Performs the motion blur of an Image given the intensity and angle
see : https://en.wikipedia.org/wiki/Motion_blur
**Parameters**
* *intensity* - The intensity of the motion blur effect. Governs the
size of the kernel used in convolution
* *angle* - Angle in degrees at which motion blur will occur. Positive
is Clockwise and negative is Anti-Clockwise. 0 blurs from left to
right
**RETURNS**
An image with the specified motion blur applied.
**EXAMPLE**
>>> img = Image ('lenna')
>>> blur = img.motionBlur(40,45)
>>> blur.show()
"""
intensity = int(intensity)
if(intensity <= 1):
logger.warning('power less than 1 will result in no change')
return self
kernel = np.zeros((intensity,intensity))
rad = math.radians(angle)
x1,y1 = intensity/2,intensity/2
x2 = int(x1-(intensity-1)/2*math.sin(rad))
y2 = int(y1 -(intensity-1)/2*math.cos(rad))
line = self.bresenham_line((x1,y1),(x2,y2))
x = [p[0] for p in line]
y = [p[1] for p in line]
kernel[x,y] = 1
kernel = kernel/len(line)
return self.convolve(kernel = kernel)
def getLightness(self):
"""
**SUMMARY**
This method converts the given RGB image to grayscale using the
Lightness method.
**Parameters**
None
**RETURNS**
A GrayScale image with values according to the Lightness method
**EXAMPLE**
>>> img = Image ('lenna')
>>> out = img.getLightness()
>>> out.show()
**NOTES**
Algorithm used: value = (MAX(R,G,B) + MIN(R,G,B))/2
"""
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
imgMat = np.array(self.getNumpyCv2(),dtype=np.int)
retVal = np.array((np.max(imgMat,2) + np.min(imgMat,2))/2,dtype=np.uint8)
else:
logger.warnings('Input a RGB image')
return None
return Image(retVal,cv2image=True)
def getLuminosity(self):
"""
**SUMMARY**
This method converts the given RGB image to grayscale using the
Luminosity method.
**Parameters**
None
**RETURNS**
A GrayScale image with values according to the Luminosity method
**EXAMPLE**
>>> img = Image ('lenna')
>>> out = img.getLuminosity()
>>> out.show()
**NOTES**
Algorithm used: value = 0.21 R + 0.71 G + 0.07 B
"""
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
imgMat = np.array(self.getNumpyCv2(),dtype=np.int)
retVal = np.array(np.average(imgMat,2,(0.07,0.71,0.21)),dtype=np.uint8)
else:
logger.warnings('Input a RGB image')
return None
return Image(retVal,cv2image=True)
def getAverage(self):
"""
**SUMMARY**
This method converts the given RGB image to grayscale by averaging out
the R,G,B values.
**Parameters**
None
**RETURNS**
A GrayScale image with values according to the Average method
**EXAMPLE**
>>> img = Image ('lenna')
>>> out = img.getAverage()
>>> out.show()
**NOTES**
Algorithm used: value = (R+G+B)/3
"""
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
imgMat = np.array(self.getNumpyCv2(),dtype=np.int)
retVal = np.array(imgMat.mean(2),dtype=np.uint8)
else:
logger.warnings('Input a RGB image')
return None
return Image(retVal,cv2image=True)
def smartRotate(self,bins=18,point = [-1,-1],auto = True,threshold=80,minLength=30,maxGap=10,t1=150,t2=200,fixed = True):
"""
**SUMMARY**
Attempts to rotate the image so that the most significant lines are
approximately parellel to horizontal or vertical edges.
**Parameters**
* *bins* - The number of bins the lines will be grouped into.
* *point* - the point about which to rotate, refer :py:meth:`rotate`
* *auto* - If true point will be computed to the mean of centers of all
the lines in the selected bin. If auto is True, value of point is
ignored
* *threshold* - which determines the minimum "strength" of the line
refer :py:meth:`findLines` for details.
* *minLength* - how many pixels long the line must be to be returned,
refer :py:meth:`findLines` for details.
* *maxGap* - how much gap is allowed between line segments to consider
them the same line .refer to :py:meth:`findLines` for details.
* *t1* - thresholds used in the edge detection step,
refer to :py:meth:`_getEdgeMap` for details.
* *t2* - thresholds used in the edge detection step,
refer to :py:meth:`_getEdgeMap` for details.
* *fixed* - if fixed is true,keep the original image dimensions,
otherwise scale the image to fit the rotation , refer to
:py:meth:`rotate`
**RETURNS**
A rotated image
**EXAMPLE**
>>> i = Image ('image.jpg')
>>> i.smartRotate().show()
"""
lines = self.findLines(threshold, minLength, maxGap, t1,t2)
if(len(lines) == 0):
logger.warning("No lines found in the image")
return self
# Initialize empty bins
binn = [[] for i in range(bins)]
#Convert angle to bin number
conv = lambda x:int(x+90)/bins
#Adding lines to bins
[ binn[conv(line.angle())].append(line) for line in lines ]
#computing histogram, value of each column is total length of all lines
#in the bin
hist = [ sum([line.length() for line in lines]) for lines in binn]
#The maximum histogram
index = np.argmax(np.array(hist))
#Good ol weighted mean, for the selected bin
avg = sum([line.angle()*line.length() for line in binn[index]])/sum([line.length() for line in binn[index] ])
#Mean of centers of all lines in selected bin
if(auto ):
x = sum([line.end_points[0][0] + line.end_points[1][0] for line in binn[index]])/2/len(binn[index])
y = sum([line.end_points[0][1] + line.end_points[1][1] for line in binn[index]])/2/len(binn[index])
point = [x,y]
#Determine whether to rotate the lines to vertical or horizontal
if (-45 <= avg <= 45):
return self.rotate(avg,fixed = fixed,point = point)
elif (avg > 45):
return self.rotate(avg-90,fixed = fixed,point = point)
else:
return self.rotate(avg+90,fixed = fixed,point = point)
#Congratulations !! You did a smart thing
def normalize(self, newMin = 0, newMax = 255, minCut = 2, maxCut = 98):
"""
**SUMMARY**
Performs image normalization and yeilds a linearly normalized gray image.
Also known as contrast strestching.
see : http://en.wikipedia.org/wiki/Normalization_(image_processing)
**Parameters**
* *newMin* - The minimum of the new range over which the image is normalized
* *newMax* - The maximum of the new range over which the image is normalized
* *minCut* - A number between 0 to 100. The threshold percentage for the
current minimum value selection. This helps us to avoid the effect of outlying
pixel with either very low value
* *maxCut* - A number between 0 to 100. The threshold percentage for the
current minimum value selection. This helps us to avoid the effect of outlying
pixel with either very low value
**RETURNS**
A normalized grayscale image.
**EXAMPLE**
>>> img = Image ('lenna')
>>> norm = i.normalize()
>>> norm.show()
"""
if newMin < 0 or newMax >255:
warnings.warn("newMin and newMax can vary from 0-255")
return None
if newMax < newMin:
warnings.warn("newMin should be less than newMax")
return None
if minCut > 100 or maxCut > 100:
warnings.warn("minCut and maxCut")
return None
#avoiding the effect of odd pixels
try:
hist = self.getGrayHistogramCounts()
freq, val = zip(*hist)
maxfreq = (freq[0]-freq[-1])* maxCut/100.0
minfreq = (freq[0]-freq[-1])* minCut/100.0
closestMatch = lambda a,l:min(l, key=lambda x:abs(x-a))
maxval = closestMatch(maxfreq, val)
minval = closestMatch(minfreq, val)
retVal = (self.grayscale()-minval)*((newMax-newMin)/float(maxval-minval))+ newMin
#catching zero division in case there are very less intensities present
#Normalizing based on absolute max and min intensities present
except ZeroDivisionError:
maxval = self.maxValue()
minval = self.minValue()
retVal = (self.grayscale()-minval)*((newMax-newMin)/float(maxval-minval))+ newMin
#catching the case where there is only one intensity throughout
except:
warnings.warn("All pixels of the image have only one intensity value")
return None
return retVal
def getNormalizedHueHistogram(self,roi=None):
"""
**SUMMARY**
This method generates a normalized hue histogram for the image
or the ROI within the image. The hue histogram is a 2D hue/saturation
numpy array histogram with a shape of 180x256. This histogram can
be used for histogram back projection.
**PARAMETERS**
* *roi* - Anything that can be cajoled into being an ROI feature
including a tuple of (x,y,w,h), a list of points, or another feature.
**RETURNS**
A normalized 180x256 numpy array that is the hue histogram.
**EXAMPLE**
>>> img = Image('lenna')
>>> roi = (0,0,100,100)
>>> hist = img.getNormalizedHueHistogram(roi)
**SEE ALSO**
ImageClass.backProjectHueHistogram()
ImageClass.findBlobsFromHueHistogram()
"""
try:
import cv2
except ImportError:
warnings.warn("OpenCV >= 2.3 required to use this.")
return None
from SimpleCV.Features import ROI
if( roi ): # roi is anything that can be taken to be an roi
roi = ROI(roi,self)
hsv = roi.crop().toHSV().getNumpyCv2()
else:
hsv = self.toHSV().getNumpyCv2()
hist = cv2.calcHist([hsv],[0,1],None,[180,256],[0,180,0,256])
cv2.normalize(hist,hist,0,255,cv2.NORM_MINMAX)
return hist
def backProjectHueHistogram(self,model,smooth=True,fullColor=False,threshold=None):
"""
**SUMMARY**
This method performs hue histogram back projection on the image. This is a very
quick and easy way of matching objects based on color. Given a hue histogram
taken from another image or an roi within the image we attempt to find all
pixels that are similar to the colors inside the histogram. The result can
either be a grayscale image that shows the matches or a color image.
**PARAMETERS**
* *model* - The histogram to use for pack projection. This can either be
a histogram, anything that can be converted into an ROI for the image (like
an x,y,w,h tuple or a feature, or another image.
* *smooth* - A bool, True means apply a smoothing operation after doing the
back project to improve the results.
* *fullColor* - return the results as a color image where pixels included
in the back projection are rendered as their source colro.
* *threshold* - If this value is not None, we apply a threshold to the
result of back projection to yield a binary image. Valid values are from
1 to 255.
**RETURNS**
A SimpleCV Image rendered according to the parameters provided.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> hist = img.getNormalizedHueHistogram((0,0,50,50)) # generate a hist
>>>> a = img.backProjectHueHistogram(hist)
>>>> b = img.backProjectHueHistogram((0,0,50,50) # same result
>>>> c = img.backProjectHueHistogram(Image('lyle'))
**SEE ALSO**
ImageClass.getNormalizedHueHistogram()
ImageClass.findBlobsFromHueHistogram()
"""
try:
import cv2
except ImportError:
warnings.warn("OpenCV >= 2.3 required to use this.")
return None
if( model is None ):
warnings.warn('Backproject requires a model')
return None
# this is the easier test, try to cajole model into ROI
if( isinstance(model,Image) ):
model = model.getNormalizedHueHistogram()
if(not isinstance(model,np.ndarray) or model.shape != (180,256) ):
model = self.getNormalizedHueHistogram(model)
if( isinstance(model,np.ndarray) and model.shape == (180,256) ):
hsv = self.toHSV().getNumpyCv2()
dst = cv2.calcBackProject([hsv],[0,1],model,[0,180,0,256],1)
if smooth:
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
cv2.filter2D(dst,-1,disc,dst)
result = Image(dst,cv2image=True)
result = result.toBGR()
if( threshold ):
result = result.threshold(threshold)
if( fullColor ):
temp = Image((self.width,self.height))
result = temp.blit(self,alphaMask=result)
return result
else:
warnings.warn('Backproject model does not appear to be valid')
return None
def findBlobsFromHueHistogram(self,model,threshold=1,smooth=True,minsize=10,maxsize=None):
"""
**SUMMARY**
This method performs hue histogram back projection on the image and uses
the results to generate a FeatureSet of blob objects. This is a very
quick and easy way of matching objects based on color. Given a hue histogram
taken from another image or an roi within the image we attempt to find all
pixels that are similar to the colors inside the histogram.
**PARAMETERS**
* *model* - The histogram to use for pack projection. This can either be
a histogram, anything that can be converted into an ROI for the image (like
an x,y,w,h tuple or a feature, or another image.
* *smooth* - A bool, True means apply a smoothing operation after doing the
back project to improve the results.
* *threshold* - If this value is not None, we apply a threshold to the
result of back projection to yield a binary image. Valid values are from
1 to 255.
* *minsize* - the minimum blob size in pixels.
* *maxsize* - the maximum blob size in pixels.
**RETURNS**
A FeatureSet of blob objects or None if no blobs are found.
**EXAMPLE**
>>>> img = Image('lenna')
>>>> hist = img.getNormalizedHueHistogram((0,0,50,50)) # generate a hist
>>>> blobs = img.findBlobsFromHueHistogram(hist)
>>>> blobs.show()
**SEE ALSO**
ImageClass.getNormalizedHueHistogram()
ImageClass.backProjectHueHistogram()
"""
newMask = self.backProjectHueHistogram(model,smooth,fullColor=False,threshold=threshold)
return self.findBlobsFromMask(newMask,minsize=minsize,maxsize=maxsize)
def filter(self, flt, grayscale=False):
"""
**SUMMARY**
This function allows you to apply an arbitrary filter to the DFT of an image.
This filter takes in a gray scale image, whiter values are kept and black values
are rejected. In the DFT image, the lower frequency values are in the corners
of the image, while the higher frequency components are in the center. For example,
a low pass filter has white squares in the corners and is black everywhere else.
**PARAMETERS**
* *flt* - A DFT filter
* *grayscale* - if this value is True we perfrom the operation on the DFT of the gray
version of the image and the result is gray image. If grayscale is true
we perform the operation on each channel and the recombine them to create
the result.
**RETURNS**
A SimpleCV image after applying the filter.
**EXAMPLE**
>>> filter = DFT.createGaussianFilter()
>>> myImage = Image("MyImage.png")
>>> result = myImage.filter(filter)
>>> result.show()
"""
filteredimage = flt.applyFilter(self, grayscale)
return filteredimage
from SimpleCV.Features import FeatureSet, Feature, Barcode, Corner, HaarFeature, Line, Chessboard, TemplateMatch, BlobMaker, Circle, KeyPoint, Motion, KeypointMatch, FaceRecognizer
from SimpleCV.Tracking import camshiftTracker, lkTracker, surfTracker, mfTracker, TrackSet
from SimpleCV.Stream import JpegStreamer
from SimpleCV.Font import *
from SimpleCV.DrawingLayer import *
from SimpleCV.DFT import DFT
| bsd-3-clause |
Akshay0724/scikit-learn | examples/applications/plot_out_of_core_classification.py | 51 | 13651 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <[email protected]>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
# --------------------------------
#
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
# ----
#
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batches of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
# ------------
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = ['b', 'g', 'r', 'c', 'm', 'y']
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
ran0101/namebench | nb_third_party/dns/rdtypes/ANY/CNAME.py | 248 | 1092 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.nsbase
class CNAME(dns.rdtypes.nsbase.NSBase):
"""CNAME record
Note: although CNAME is officially a singleton type, dnspython allows
non-singleton CNAME rdatasets because such sets have been commonly
used by BIND and other nameservers for load balancing."""
pass
| apache-2.0 |
Huyuwei/tvm | python/tvm/contrib/dlpack.py | 2 | 2036 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Wrapping functions to bridge frameworks with DLPack support to TVM"""
from .. import ndarray
def convert_func(tvm_func, tensor_type, to_dlpack_func):
"""Convert a tvm function into one that accepts a tensor from another
framework, provided the other framework supports DLPACK
Parameters
----------
tvm_func: Function
Built tvm function operating on arrays
tensor_type: Type
Type of the tensors of the target framework
to_dlpack_func: Function
Function to convert the source tensors to DLPACK
"""
assert callable(tvm_func)
def _wrapper(*args):
args = tuple(ndarray.from_dlpack(to_dlpack_func(arg))\
if isinstance(arg, tensor_type) else arg for arg in args)
return tvm_func(*args)
return _wrapper
def to_pytorch_func(tvm_func):
"""Convert a tvm function into one that accepts PyTorch tensors
Parameters
----------
tvm_func: Function
Built tvm function operating on arrays
Returns
-------
wrapped_func: Function
Wrapped tvm function that operates on PyTorch tensors
"""
import torch
import torch.utils.dlpack
return convert_func(tvm_func, torch.Tensor, torch.utils.dlpack.to_dlpack)
| apache-2.0 |
scr4t/rep | tests/test_grid.py | 4 | 3211 | from __future__ import division, print_function, absolute_import
from collections import OrderedDict
from sklearn import clone
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import roc_auc_score
from rep.metaml import GridOptimalSearchCV, SubgridParameterOptimizer, FoldingScorer, \
RegressionParameterOptimizer
from rep.test.test_estimators import generate_classification_data, check_grid, run_grid
from rep.estimators import SklearnClassifier
__author__ = 'Tatiana Likhomanenko'
def grid_tmva(score_function):
grid_param = OrderedDict({"MaxDepth": [4, 5], "NTrees": [10, 20]})
generator = SubgridParameterOptimizer(grid_param)
scorer = FoldingScorer(score_function)
from rep.estimators import TMVAClassifier
grid = GridOptimalSearchCV(TMVAClassifier(features=['column0', 'column1']), generator, scorer)
cl = check_grid(grid, False, False, False)
assert 1 <= len(cl.features) <= 3
params = cl.get_params()
for key in grid_param:
assert params[key] == grid.generator.best_params_[key]
def grid_sklearn(score_function):
grid_param = OrderedDict({"n_estimators": [10, 20],
"learning_rate": [0.1, 0.05],
'features': [['column0', 'column1'], ['column0', 'column1', 'column2']]})
generator = RegressionParameterOptimizer(grid_param)
scorer = FoldingScorer(score_function)
grid = GridOptimalSearchCV(SklearnClassifier(clf=AdaBoostClassifier()), generator, scorer)
cl = check_grid(grid, False, False, False)
assert 1 <= len(cl.features) <= 3
params = cl.get_params()
for key in grid_param:
if key in params:
assert params[key] == grid.generator.best_params_[key]
else:
assert params['clf__' + key] == grid.generator.best_params_[key]
def grid_custom(custom):
grid_param = OrderedDict({"n_estimators": [10, 20],
"learning_rate": [0.1, 0.05],
'features': [['column0', 'column1'], ['column0', 'column1', 'column2']]})
generator = SubgridParameterOptimizer(grid_param)
grid = GridOptimalSearchCV(SklearnClassifier(clf=AdaBoostClassifier(),
features=['column0', 'column1']), generator, custom)
cl = check_grid(grid, False, False, False)
assert 1 <= len(cl.features) <= 3
params = cl.get_params()
for key in grid_param:
if key in params:
assert params[key] == grid.generator.best_params_[key]
else:
assert params['clf__' + key] == grid.generator.best_params_[key]
def test_grid():
def generate_scorer(test, labels):
def custom(base_estimator, params, X, y, sample_weight=None):
cl = clone(base_estimator)
cl.set_params(**params)
if sample_weight is not None:
cl.fit(X, y, sample_weight)
else:
cl.fit(X, y)
return roc_auc_score(labels, cl.predict_proba(test)[:, 1])
return custom
X, y, _ = generate_classification_data()
grid_custom(generate_scorer(X, y))
run_grid(grid_sklearn)
run_grid(grid_tmva) | apache-2.0 |
fukatani/CW_gui | chainer_wing/data_fetch.py | 1 | 10396 | import csv
import glob
from importlib import machinery
import os
from chainer.datasets import tuple_dataset
from chainer.datasets.image_dataset import _read_image_as_array
import numpy
from chainer_wing.extension.image_dataset import augment_data
from chainer_wing.subwindows.train_config import TrainParamServer
from chainer_wing import util
class DataManager(object):
def __init__(self):
self.train_columns = 0
def get_data_from_file(self, file_name, is_supervised, shuffle=False):
if file_name.endswith('.csv'):
return self.csv_to_ndarray(file_name, is_supervised, shuffle)
elif file_name.endswith('.npz'):
data = numpy.load(file_name)
if shuffle:
numpy.random.shuffle(data)
if is_supervised:
return data['x'], data['y']
else:
return data['x'], None
else:
raise util.UnexpectedFileExtension()
def csv_to_ndarray(self, csv_file, is_supervised, shuffle):
exists_header = 0
with open(csv_file, 'r') as f:
reader = csv.reader(f)
for line in reader:
if isinstance(line[0], str):
exists_header = 1
break
array = numpy.loadtxt(csv_file, dtype=numpy.float32,
delimiter=',', skiprows=exists_header)
if shuffle:
numpy.random.shuffle(array)
if is_supervised:
return array[:, :-1], numpy.atleast_2d(array[:, -1]).T
return array, None
def pack_data(self, data, label):
return tuple_dataset.TupleDataset(data, label)
def get_data_train(self):
train_server = TrainParamServer()
if train_server['TrainData'].endswith('.py'):
module = machinery.SourceFileLoader('data_getter',
train_server['TrainData'])
try:
module = module.load_module()
train_data, test_data = module.main()
train_data, train_label = train_data._datasets
test_data, test_label = test_data._datasets
except Exception as e:
raise util.AbnormalDataCode(e.args)
elif train_server['UseSameData']:
data_file = train_server['TrainData']
data, label = self.get_data_from_file(data_file, True,
train_server['Shuffle'])
if train_server['Shuffle']:
numpy.random.shuffle(data)
split_idx = int(data.shape[0] * train_server['TestDataRatio'])
train_data = data[:split_idx]
train_label = label[:split_idx]
test_data = data[split_idx:]
test_label = label[split_idx:]
else:
train_file = train_server['TrainData']
train_data, train_label = self.get_data_from_file(train_file, True,
train_server['Shuffle'])
test_file = train_server['TestData']
test_data, test_label = self.get_data_from_file(test_file, True)
# minmax
if TrainParamServer().use_minmax():
test_data = self.minmax_scale(test_data)
train_data = self.minmax_scale(train_data)
test_data = self.pack_data(test_data, test_label)
train_data = self.pack_data(train_data, train_label)
return train_data, test_data
def get_data_pred(self, including_label):
train_server = TrainParamServer()
if train_server['PredInputData'].endswith('.py'):
module = machinery.SourceFileLoader('data_getter',
train_server['PredInputData'])
try:
module = module.load_module()
if including_label:
data, label = module.main()
else:
data, label = module.main(), None
except Exception as error:
raise util.AbnormalDataCode(error.args)
else:
data_file = train_server['PredInputData']
data, label = self.get_data_from_file(data_file, including_label)
if TrainParamServer().use_minmax():
data = self.minmax_scale(data)
return data, label
def minmax_scale(self, x, lower_limit=0., upper_limit=1.):
data_min = numpy.min(x, axis=0)
x = x - data_min + lower_limit
data_max = numpy.max(x, axis=0)
return x / data_max * upper_limit
class ImageDataManager(object):
def __init__(self):
self.label_to_int = {}
def get_label(self, label):
return label.split('/')[-2]
def get_all_images(self, dir_name):
if not os.path.isdir(dir_name):
raise Exception('Directory {} was not found.'.format(dir_name))
image_files = []
for ext in util.for_image_extensions():
image_files += glob.glob(dir_name + '/*/*.{}'.format(ext))
if not image_files:
raise Exception('No jpg file in {}'.format(dir_name))
labels = []
for image_file in image_files:
label = self.get_label(image_file)
labels.append(label)
return numpy.array(image_files), numpy.array(labels)
def make_image_list(self, image_files, labels, list_file_name):
assert len(image_files) == len(labels)
with open(list_file_name, 'w') as fw:
for image, label in zip(image_files, labels):
fw.write(image + ' ' + self.label_to_int[label] + '\n')
def make_label_conversion_file(self, labels,
label_convertion_file):
self.label_to_int = {}
for label in labels:
if label not in self.label_to_int:
self.label_to_int[label] = str(len(self.label_to_int))
with open(label_convertion_file, 'w') as fw:
for key, value in self.label_to_int.items():
fw.write(key + ' ' + value + '\n')
def get_data_train(self):
train_server = TrainParamServer()
train_images, train_labels = self.get_all_images(train_server['TrainData'])
if train_server['UseSameData']:
split_idx = int(len(train_images) * train_server['TestDataRatio'])
indices = numpy.arange(len(train_images))
if train_server['Shuffle']:
numpy.random.shuffle(indices)
train_idx = indices[:split_idx]
test_idx = indices[split_idx:]
test_images = train_images[test_idx]
test_labels = train_labels[test_idx]
train_images = train_images[train_idx]
train_labels = train_labels[train_idx]
else:
test_images, test_labels = self.get_all_images(train_server['TrainData'])
all_labels = numpy.hstack((train_labels, test_labels))
all_labels = sorted(list(set(all_labels)))
label_conversion_file = os.path.join(train_server.get_work_dir(),
'label_conversion.txt')
self.make_label_conversion_file(all_labels, label_conversion_file)
train_label_file = os.path.join(train_server.get_work_dir(),
'train_label.txt')
self.make_image_list(train_images, train_labels, train_label_file)
test_label_file = os.path.join(train_server.get_work_dir(),
'test_label.txt')
self.make_image_list(test_images, test_labels, test_label_file)
self.compute_mean(train_images)
def compute_mean(self, images):
print('compute mean image')
sum_image = 0
N = len(images)
resize_width = TrainParamServer()['ResizeWidth']
resize_height = TrainParamServer()['ResizeHeight']
crop_edit = TrainParamServer()['Crop']
crop_width = TrainParamServer()['CropWidth']
crop_height = TrainParamServer()['CropHeight']
use_random_x_flip = TrainParamServer()['UseRandomXFlip']
use_random_y_flip = TrainParamServer()['UseRandomYFlip']
use_random_rotate = TrainParamServer()['UseRandomRotation']
pca_lighting = TrainParamServer()['PCAlighting']
for i, image in enumerate(images):
image = _read_image_as_array(image, numpy.float32)
image = image.transpose(2, 0, 1).astype(numpy.float32)
image = augment_data(image, resize_width, resize_height,
use_random_x_flip, use_random_y_flip,
use_random_rotate, pca_lighting, crop_edit,
crop_width, crop_height)
sum_image += image
mean_file = os.path.join(TrainParamServer().get_work_dir(),
'mean.npy')
mean = sum_image / N
numpy.save(mean_file, mean)
def get_data_pred(self):
train_server = TrainParamServer()
if os.path.isdir(train_server['PredInputData']):
dir_name = train_server['PredInputData']
image_files = []
for ext in util.for_image_extensions():
image_files += glob.glob(dir_name + '/*.{}'.format(ext))
image_files += glob.glob(dir_name + '/*/*.{}'.format(ext))
if not image_files:
raise Exception('No jpg file in {}'.format(dir_name))
pred_label_file = os.path.join(train_server.get_work_dir(),
'pred_label.txt')
elif os.path.isfile(train_server['PredInputData']):
image_files = (train_server['PredInputData'],)
pred_label_file = os.path.join(train_server.get_work_dir(),
'pred_label.txt')
else:
raise FileNotFoundError(train_server['PredInputData'] +
' is not found.')
with open(pred_label_file, 'w') as fw:
for image, label in zip(image_files, pred_label_file):
fw.write(image + '\n')
return pred_label_file
if __name__ == '__main__':
train_x, train_y = DataManager().get_data_from_file('sample_data.csv', True)
train_x, train_y = DataManager().get_data_from_file('sample_data.npz', True)
| bsd-3-clause |
woobe/h2o | py/testdir_single_jvm/test_GLM2_hastie_shuffle.py | 2 | 4276 | # Dataset created from this:
# Elements of Statistical Learning 2nd Ed.; Hastie, Tibshirani, Friedman; Feb 2011
# example 10.2 page 357
# Ten features, standard independent Gaussian. Target y is:
# y[i] = 1 if sum(X[i]) > .34 else -1
# 9.34 is the median of a chi-squared random variable with 10 degrees of freedom
# (sum of squares of 10 standard Gaussians)
# http://www.stanford.edu/~hastie/local.ftp/Springer/ESLII_print5.pdf
# from sklearn.datasets import make_hastie_10_2
# import numpy as np
# i = 1000000
# f = 10
# (X,y) = make_hastie_10_2(n_samples=i,random_state=None)
# y.shape = (i,1)
# Y = np.hstack((X,y))
# np.savetxt('./1mx' + str(f) + '_hastie_10_2.data', Y, delimiter=',', fmt='%.2f');
import unittest, time, sys, copy
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_util, h2o_hosts, h2o_import as h2i
def glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs=30):
print "\nStarting GLM of", csvFilename
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname,
hex_key=csvFilename + ".hex", schema='put', timeoutSecs=10)
y = 10
# Took n_folds out, because GLM doesn't include n_folds time and it's slow
# wanted to compare GLM time to my measured time
# hastie has two values, 1 and -1. need to use case for one of them
kwargs = {'response': y, 'alpha': 0, 'family': 'binomial'}
h2o.nodes[0].to_enum(src_key=parseResult['destination_key'], column_index=y+1)
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "GLM in", (time.time() - start), "secs (python measured)"
h2o_glm.simpleCheckGLM(self, glm, "C8", **kwargs)
# compare this glm to the first one. since the files are replications, the results
# should be similar?
glm_model = glm['glm_model']
validation = glm_model['submodels'][0]['validation']
if self.validation1:
h2o_glm.compareToFirstGlm(self, 'auc', validation, self.validation1)
else:
self.validation1 = copy.deepcopy(validation)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1)
else:
h2o_hosts.build_cloud_with_hosts(1)
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
validation1 = {}
def test_GLM2_hastie_shuffle(self):
h2o.beta_features = True
# gunzip it and cat it to create 2x and 4x replications in SYNDATASETS_DIR
# FIX! eventually we'll compare the 1x, 2x and 4x results like we do
# in other tests. (catdata?)
# This test also adds file shuffling, to see that row order doesn't matter
csvFilename = "1mx10_hastie_10_2.data.gz"
bucket = 'home-0xdiag-datasets'
csvPathname = 'standard' + '/' + csvFilename
fullPathname = h2i.find_folder_and_filename(bucket, csvPathname, returnFullPath=True)
glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs=30)
filename1x = "hastie_1x.data"
pathname1x = SYNDATASETS_DIR + '/' + filename1x
h2o_util.file_gunzip(fullPathname, pathname1x)
filename1xShuf = "hastie_1x.data_shuf"
pathname1xShuf = SYNDATASETS_DIR + '/' + filename1xShuf
h2o_util.file_shuffle(pathname1x, pathname1xShuf)
filename2x = "hastie_2x.data"
pathname2x = SYNDATASETS_DIR + '/' + filename2x
h2o_util.file_cat(pathname1xShuf, pathname1xShuf, pathname2x)
filename2xShuf = "hastie_2x.data_shuf"
pathname2xShuf = SYNDATASETS_DIR + '/' + filename2xShuf
h2o_util.file_shuffle(pathname2x, pathname2xShuf)
glm_doit(self, filename2xShuf, None, pathname2xShuf, timeoutSecs=45)
# too big to shuffle?
filename4x = "hastie_4x.data"
pathname4x = SYNDATASETS_DIR + '/' + filename4x
h2o_util.file_cat(pathname2xShuf,pathname2xShuf,pathname4x)
glm_doit(self,filename4x, None, pathname4x, timeoutSecs=120)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
Akshay0724/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 330 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
Fireblend/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 330 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
solashirai/edx-platform | openedx/core/lib/block_structure/tests/test_cache.py | 3 | 1842 | """
Tests for block_structure/cache.py
"""
from nose.plugins.attrib import attr
from unittest import TestCase
from ..cache import BlockStructureCache
from .helpers import ChildrenMapTestMixin, MockCache, MockTransformer
@attr('shard_2')
class TestBlockStructureCache(ChildrenMapTestMixin, TestCase):
"""
Tests for BlockStructureFactory
"""
def setUp(self):
super(TestBlockStructureCache, self).setUp()
self.children_map = self.SIMPLE_CHILDREN_MAP
self.block_structure = self.create_block_structure(self.children_map)
self.cache = BlockStructureCache(MockCache())
def add_transformers(self):
"""
Add each registered transformer to the block structure.
Mimic collection by setting test transformer block data.
"""
for transformer in [MockTransformer]:
self.block_structure._add_transformer(transformer) # pylint: disable=protected-access
self.block_structure.set_transformer_block_field(
usage_key=0, transformer=transformer, key='test', value='{} val'.format(transformer.name())
)
def test_add(self):
self.add_transformers()
self.cache.add(self.block_structure)
cached_value = self.cache.get(self.block_structure.root_block_usage_key)
self.assertIsNotNone(cached_value)
self.assert_block_structure(cached_value, self.children_map)
def test_get_none(self):
self.assertIsNone(
self.cache.get(self.block_structure.root_block_usage_key)
)
def test_delete(self):
self.add_transformers()
self.cache.add(self.block_structure)
self.cache.delete(self.block_structure.root_block_usage_key)
self.assertIsNone(
self.cache.get(self.block_structure.root_block_usage_key)
)
| agpl-3.0 |
neurospin/pylearn-epac | doc/memory_benchmark/test_memmapping.py | 1 | 10862 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 25 15:56:37 2013
@author: [email protected]
"""
import numpy as np
from sklearn import datasets
import random
import time
#import datetime
import sys
import os
#import tempfile
import unittest
import dill as pickle
import getopt
from epac import StoreFs
from epac.tests.utils import isequal, compare_two_node
def create_mmat(nrows, ncols, default_values=None, dir=None,
writing_mode=True):
''' Create a random matrix with memory mapping (saved on the disk)
Create a matrix of the desired number of rows and columns, and fill it with
random numbers taken from the defaults_values list.
Parameters
----------
nrows: int
number of rows the matrix
ncols: int
number of columns of the matrix
default_values: list of integers
Choose the random integers from this list to fill the matrix
dir: directory path
Path of the directory where the matrix will be saved
If None, save in /tmp
writing_mode: boolean
If True, generate the matrix
Otherwise, test if there is an existing matrix. If there is, load the
previously generated matrix, if not, generate it
'''
# Define the name of the matrix, depending on its size
filename = 'tmp_rows_' + str(nrows) + '_cols_' + str(ncols)
if dir is None:
filepath = '/tmp/' + filename
else:
if not os.path.isdir(dir):
os.mkdir(dir)
filepath = os.path.join(dir, filename)
# Test if the file already exists
existing_file = os.path.isfile(filepath)
if writing_mode or not existing_file:
# If the user wants, or if the file doesn't exist already,
# generate the matrix and fill it row by row
mem_mat = np.memmap(filepath,
dtype='float32',
mode='w+',
shape=(nrows, ncols))
for i in xrange(nrows):
if not default_values:
mem_mat[i, :] = np.random.random(size=ncols)
elif type(default_values) is list:
insert_row = np.zeros(ncols)
for j in xrange(len(insert_row)):
pos = random.randint(0, len(default_values) - 1)
insert_row[j] = default_values[pos]
mem_mat[i, :] = insert_row
else:
# Load the matrix previously generated
mem_mat = np.memmap(filepath,
dtype='float32',
mode='r+',
shape=(nrows, ncols))
return mem_mat
def create_array(size, default_values=None, dir=None, writing_mode=True):
''' Create a random array with memory mapping (saved on the disk)
Create a array of the desired size, and fill it with
random numbers taken from the defaults_values list.
Parameters
----------
size: int
size of the array
default_values: list of integers
Choose the random integers from this list to fill the matrix
dir: directory path
Path of the directory where the matrix will be saved
If None, save in /tmp
writing_mode: boolean
If True, generate the matrix
Otherwise, load a previously generated matrix
'''
ret_array = create_mmat(size, 1, default_values=default_values, dir=dir,
writing_mode=writing_mode)
ret_array = ret_array[:, 0]
return ret_array
# @profile
class TestMemMapping(unittest.TestCase):
''' Test the capacity of the computer
Parameters
----------
n_samples: int
Number of rows of the X matrix
n_features: int
Number of columns of the X matrix
memmap: boolean
If True, use memory mapping to reduce memory cost
n_proc: int
Number of processes
is_swf: boolean
If True, run the processes on the cluster
If False, run on the local machine
dir: directory path
Path of the directory where you want to save the temporary files
If None, save in /tmp
'''
def __init__(self, n_samples, n_features, memmap,
n_proc, is_swf, directory, testname='test_memmapping'):
super(TestMemMapping, self).__init__(testname)
self.n_samples = n_samples
self.n_features = n_features
self.memmap = memmap
self.n_proc = n_proc
self.is_swf = is_swf
self.directory = directory
def test_memmapping(self):
## 1) Building dataset
## ============================================================
if self.memmap:
# If the proc is 1, always generate the matrix
# Otherwise, load it if it exists, or create it if it doesn't
writing_mode = (self.n_proc == 1)
X = create_mmat(self.n_samples, self.n_features,
dir=self.directory,
writing_mode=writing_mode)
y = create_array(self.n_samples, [0, 1], dir=self.directory,
writing_mode=writing_mode)
Xy = dict(X=X, y=y)
else:
X, y = datasets.make_classification(n_samples=self.n_samples,
n_features=self.n_features,
n_informative=2,
random_state=1)
Xy = dict(X=X, y=y)
## 2) Building workflow
## =======================================================
from sklearn.svm import SVC
from epac import CV, Methods
cv_svm_local = CV(Methods(*[SVC(kernel="linear"),
SVC(kernel="rbf")]), n_folds=3)
cv_svm = None
if self.is_swf:
# Running on the cluster
from epac import SomaWorkflowEngine
mmap_mode = None
if self.memmap:
mmap_mode = "r+"
swf_engine = SomaWorkflowEngine(cv_svm_local,
num_processes=self.n_proc,
resource_id="jl237561@gabriel",
login="jl237561",
# remove_finished_wf=False,
# remove_local_tree=False,
mmap_mode=mmap_mode,
queue="Global_long")
cv_svm = swf_engine.run(**Xy)
# Printing information about the jobs
time.sleep(2)
print ''
sum_memory = 0
max_time_cost = 0
for job_info in swf_engine.engine_info:
print "mem_cost=", job_info.mem_cost, \
", vmem_cost=", job_info.vmem_cost, \
", time_cost=", job_info.time_cost
sum_memory += job_info.mem_cost
if max_time_cost < job_info.time_cost:
max_time_cost = job_info.time_cost
print "sum_memory =", sum_memory
print "max_time_cost =", max_time_cost
else:
# Running on the local machine
from epac import LocalEngine
local_engine = LocalEngine(cv_svm_local, num_processes=self.n_proc)
cv_svm = local_engine.run(**Xy)
cv_svm_reduce = cv_svm.reduce()
print "\n -> Reducing results"
print cv_svm_reduce
# Creating the directory to save results, if it doesn't exist
dirname = 'tmp_save_tree/'
if self.directory is None:
directory = '/tmp'
else:
directory = self.directory
if not os.path.isdir(directory):
os.mkdir(directory)
dirpath = os.path.join(directory, dirname)
if not os.path.isdir(dirpath):
os.mkdir(dirpath)
if self.n_proc == 1:
## 4.1) Saving results on the disk for one process
## ===================================================
store = StoreFs(dirpath=dirpath, clear=True)
cv_svm.save_tree(store=store)
with open(os.path.join(directory, "tmp_save_results"), 'w+') \
as filename:
print filename.name
pickle.dump(cv_svm_reduce, filename)
else:
## 4.2) Loading the results for one process
## ===================================================
try:
store = StoreFs(dirpath=dirpath, clear=False)
cv_svm_one_proc = store.load()
with open(os.path.join(directory, "tmp_save_results"), 'r+') \
as filename:
cv_svm_reduce_one_proc = pickle.load(filename)
## 5.2) Comparing results to the results for one process
## ===================================================
print "\nComparing %i proc with one proc" % self.n_proc
self.assertTrue(compare_two_node(cv_svm, cv_svm_one_proc))
self.assertTrue(isequal(cv_svm_reduce, cv_svm_reduce_one_proc))
except KeyError:
print "Warning: "
print "No previous tree detected, no possible "\
"comparison of results"
if __name__ == "__main__":
# Default values on the test
n_samples = 50
n_features = 500
memmap = True
n_proc = 1
is_swf = False
directory = '/volatile'
# Getting the arguments from the shell
optlist, args = getopt.gnu_getopt(sys.argv[1:], "", ["n_samples=",
"n_features=",
"memmap=",
"n_proc=",
"is_swf=",
"dir="])
# Changing the default values depending on the given arguments
for opt in optlist:
if opt[0] == '--n_samples':
n_samples = int(opt[1])
elif opt[0] == '--n_features':
n_features = int(opt[1])
elif opt[0] == '--memmap':
memmap = (opt[1] == 'True')
elif opt[0] == '--n_proc':
n_proc = int(opt[1])
elif opt[0] == '--is_swf':
is_swf = (opt[1] == 'True')
elif opt[0] == '--dir':
directory = opt[1]
# Running the test with the given arguments
suite = unittest.TestSuite()
suite.addTest(TestMemMapping(n_samples, n_features,
memmap, n_proc, is_swf,
directory))
unittest.TextTestRunner().run(suite)
| bsd-3-clause |
Fireblend/scikit-learn | sklearn/datasets/tests/test_lfw.py | 228 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
ales-erjavec/orange | Orange/OrangeWidgets/Classify/OWSaveClassifier.py | 6 | 4271 | """<name>Save Classifier</name>
<description>Save orange classifiers to a file</description>
<icon>icons/SaveClassifier.svg</icon>
<contact>Ales Erjavec (ales.erjavec(@at@)fri.uni-lj.si)</contact>
<priority>3000</priority>
"""
from OWWidget import *
import OWGUI
import orange
import sys, os
class OWSaveClassifier(OWWidget):
settingsList = ["lastSaveFile", "filenameHistory"]
def __init__(self, parent=None, signalManager=None, name="Save Classifier"):
OWWidget.__init__(self, parent, signalManager, name,
wantMainArea=False, resizingEnabled=False)
self.inputs = [("Classifier", orange.Classifier, self.setClassifier)]
self.lastSaveFile = os.path.expanduser("~/orange_classifier.pck")
self.filenameHistory = []
self.selectedFileIndex = 0
self.loadSettings()
#####
# GUI
#####
box = OWGUI.widgetBox(self.controlArea, "File",
orientation="horizontal",
addSpace=True)
self.filesCombo = OWGUI.comboBox(box, self, "selectedFileIndex",
items=[os.path.basename(f) for f in self.filenameHistory],
tooltip="Select a recently saved file",
callback=self.onRecentSelection)
self.filesCombo.setMinimumWidth(200)
self.browseButton = OWGUI.button(box, self, "...",
tooltip="Browse local file system",
callback=self.browse)
self.browseButton.setIcon(self.style().standardIcon(QStyle.SP_DirOpenIcon))
self.browseButton.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Fixed)
box = OWGUI.widgetBox(self.controlArea, "Save")
self.saveButton = OWGUI.button(box, self, "Save current classifier",
callback=self.saveCurrentClassifier)
self.saveButton.setEnabled(False)
OWGUI.rubber(self.controlArea)
self.resize(200, 100)
self.classifier = None
def onRecentSelection(self):
filename = self.filenameHistory[self.selectedFileIndex]
self.filenameHistory.pop(self.selectedFileIndex)
self.filenameHistory.insert(0, filename)
self.filesCombo.removeItem(self.selectedFileIndex)
self.filesCombo.insertItem(0, os.path.basename(filename))
self.selectedFileIndex = 0
def browse(self):
filename = QFileDialog.getSaveFileName(self, "Save Classifier As ...",
self.lastSaveFile, "Pickle files (*.pickle *.pck);; All files (*.*)")
filename = unicode(filename)
if filename:
if filename in self.filenameHistory:
self.selectedFileIndex = self.filenameHistory.index(filename)
self.onRecentSelection()
return
self.lastSaveFile = filename
self.filenameHistory.insert(0, filename)
self.filesCombo.insertItem(0, os.path.basename(filename))
self.filesCombo.setCurrentIndex(0)
self.saveButton.setEnabled(self.classifier is not None and bool(self.filenameHistory))
def saveCurrentClassifier(self):
if self.classifier is not None:
filename = self.filenameHistory[self.selectedFileIndex]
import cPickle
self.error(0)
try:
cPickle.dump(self.classifier, open(filename, "wb"))
except Exception, ex:
self.error(0, "Could not save classifier! %s" % str(ex))
def setClassifier(self, classifier=None):
self.classifier = classifier
self.saveButton.setEnabled(classifier is not None and bool(self.filenameHistory))
if __name__ == "__main__":
app = QApplication([])
w = OWSaveClassifier()
import orngTree
data = orange.ExampleTable("../../doc/datasets/iris.tab")
w.setClassifier(orngTree.TreeLearner(data))
w.show()
app.exec_()
w.saveSettings()
| gpl-3.0 |
Akshay0724/scikit-learn | examples/datasets/plot_random_multilabel_dataset.py | 272 | 3402 | """
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
| bsd-3-clause |
thientu/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 380 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
MaestroGraph/sparse-hyper | sparse/tensors.py | 1 | 10861 | import torch
from torch import FloatTensor, LongTensor
from torch.autograd import Variable
import torch.nn.functional as F
from sparse.util import prod
import util, sys
from util import d
"""
Utility functions for manipulation tensors
"""
def flatten_indices_mat(indices, in_shape, out_shape):
"""
Turns a n NxK matrix of N index-tuples for a tensor T of rank K into an Nx2 matrix M of index-tuples for a _matrix_
that is created by flattening the first 'in_shape' dimensions into the vertical dimension of M and the remaining
dimensions in the the horizontal dimension of M.
:param indices: Long tensor
:param in_rank:
:return: (1) A matrix of size N by 2, (2) the dimensions of M
"""
batchsize, n, rank = indices.size()
inrank = len(in_shape)
outrank = len(out_shape)
result = torch.cuda.LongTensor(batchsize, n, 2) if indices.is_cuda else LongTensor(batchsize, n, 2)
left = fi_matrix(indices[:, :, 0:outrank], out_shape) # i index of the weight matrix
right = fi_matrix(indices[:, :, outrank:rank], in_shape) # j index
result = torch.cat([left.unsqueeze(2), right.unsqueeze(2)], dim=2)
return result, LongTensor((prod(out_shape), prod(in_shape)))
def fi_matrix(indices, shape):
batchsize, rows, rank = indices.size()
prod = torch.LongTensor(rank).fill_(1)
if indices.is_cuda:
prod = prod.cuda()
for i in range(rank):
prod[i] = 1
for j in range(i + 1, len(shape)):
prod[i] *= shape[j]
indices = indices * prod.unsqueeze(0).unsqueeze(0).expand_as(indices)
return indices.sum(dim=2)
def contract(indices, values, size, x, cuda=None):
"""
Performs a contraction (generalized matrix multiplication) of a sparse tensor with and input x.
The contraction is defined so that every element of the output is the sum of every element of the input multiplied
once by a unique element from the tensor (that is, like a fully connected neural network layer). See the paper for
details.
:param indices: (b, k, r)-tensor describing indices of b sparse tensors of rank r
:param values: (b, k)-tes=nsor with the corresponding values
:param size:
:param x:
:return:
"""
# translate tensor indices to matrix indices
if cuda is None:
cuda = indices.is_cuda
b, k, r = indices.size()
# size is equal to out_size + x.size()
in_size = x.size()[1:]
out_size = size[:-len(in_size)]
assert len(out_size) + len(in_size) == r
# Flatten into a matrix multiplication
mindices, flat_size = flatten_indices_mat(indices, x.size()[1:], out_size)
x_flat = x.view(b, -1, 1)
# Prevent segfault
assert mindices.min() >= 0, 'negative index in flattened indices: {} \n {} \n Original indices {} \n {}'.format(mindices.size(), mindices, indices.size(), indices)
assert not util.contains_nan(values.data), 'NaN in values:\n {}'.format(values)
y_flat = batchmm(mindices, values, flat_size, x_flat, cuda)
return y_flat.view(b, *out_size) # reshape y into a tensor
def sparsemm(use_cuda):
"""
:param use_cuda:
:return:
"""
return SparseMMGPU.apply if use_cuda else SparseMMCPU.apply
class SparseMMCPU(torch.autograd.Function):
"""
Sparse matrix multiplication with gradients over the value-vector
Does not work with batch dim.
"""
@staticmethod
def forward(ctx, indices, values, size, xmatrix):
# print(type(size), size, list(size), intlist(size))
# print(indices.size(), values.size(), torch.Size(intlist(size)))
matrix = torch.sparse.FloatTensor(indices, values, torch.Size(intlist(size)))
ctx.indices, ctx.matrix, ctx.xmatrix = indices, matrix, xmatrix
return torch.mm(matrix, xmatrix)
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.data
# -- this will break recursive autograd, but it's the only way to get grad over sparse matrices
i_ixs = ctx.indices[0,:]
j_ixs = ctx.indices[1,:]
output_select = grad_output[i_ixs, :]
xmatrix_select = ctx.xmatrix[j_ixs, :]
grad_values = (output_select * xmatrix_select).sum(dim=1)
grad_xmatrix = torch.mm(ctx.matrix.t(), grad_output)
return None, Variable(grad_values), None, Variable(grad_xmatrix)
class SparseMMGPU(torch.autograd.Function):
"""
Sparse matrix multiplication with gradients over the value-vector
Does not work with batch dim.
"""
@staticmethod
def forward(ctx, indices, values, size, xmatrix):
# print(type(size), size, list(size), intlist(size))
matrix = torch.cuda.sparse.FloatTensor(indices, values, torch.Size(intlist(size)))
ctx.indices, ctx.matrix, ctx.xmatrix = indices, matrix, xmatrix
return torch.mm(matrix, xmatrix)
@staticmethod
def backward(ctx, grad_output):
grad_output = grad_output.data
# -- this will break recursive autograd, but it's the only way to get grad over sparse matrices
i_ixs = ctx.indices[0,:]
j_ixs = ctx.indices[1,:]
output_select = grad_output[i_ixs]
xmatrix_select = ctx.xmatrix[j_ixs]
grad_values = (output_select * xmatrix_select).sum(dim=1)
grad_xmatrix = torch.mm(ctx.matrix.t(), grad_output)
return None, Variable(grad_values), None, Variable(grad_xmatrix)
def batchmm(indices, values, size, xmatrix, cuda=None):
"""
Multiply a batch of sparse matrices (indices, values, size) with a batch of dense matrices (xmatrix)
:param indices:
:param values:
:param size:
:param xmatrix:
:return:
"""
if cuda is None:
cuda = indices.is_cuda
b, n, r = indices.size()
dv = 'cuda' if cuda else 'cpu'
height, width = size
size = torch.tensor(size, device=dv, dtype=torch.long)
bmult = size[None, None, :].expand(b, n, 2)
m = torch.arange(b, device=dv, dtype=torch.long)[:, None, None].expand(b, n, 2)
bindices = (m * bmult).view(b*n, r) + indices.view(b*n, r)
bfsize = Variable(size * b)
bvalues = values.contiguous().view(-1)
b, w, z = xmatrix.size()
bxmatrix = xmatrix.view(-1, z)
sm = sparsemm(cuda)
result = sm(bindices.t(), bvalues, bfsize, bxmatrix)
return result.view(b, height, -1)
def intlist(tensor):
"""
A slow and stupid way to turn a tensor into an iterable over ints
:param tensor:
:return:
"""
if type(tensor) is list:
return tensor
tensor = tensor.squeeze()
assert len(tensor.size()) == 1
s = tensor.size()[0]
l = [None] * s
for i in range(s):
l[i] = int(tensor[i])
return l
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def simple_normalize(indices, values, size, row=True, method='softplus', cuda=torch.cuda.is_available()):
"""
Simple softmax-style normalization with
:param indices:
:param values:
:param size:
:param row:
:return:
"""
epsilon = 1e-7
if method == 'softplus':
values = F.softplus(values)
elif method == 'abs':
values = values.abs()
elif method == 'relu':
values = F.relu(values)
else:
raise Exception(f'Method {method} not recognized')
sums = sum(indices, values, size, row=row)
return (values/(sums + epsilon))
# -- stable(ish) softmax
def logsoftmax(indices, values, size, its=10, p=2, method='iteration', row=True, cuda=torch.cuda.is_available()):
"""
Row or column log-softmaxes a sparse matrix (using logsumexp trick)
:param indices:
:param values:
:param size:
:param row:
:return:
"""
epsilon = 1e-7
if method == 'naive':
values = values.exp()
sums = sum(indices, values, size, row=row)
return (values/(sums + epsilon)).log()
if method == 'pnorm':
maxes = rowpnorm(indices, values, size, p=p)
elif method == 'iteration':
maxes = itmax(indices, values, size,its=its, p=p)
else:
raise Exception('Max method {} not recognized'.format(method))
mvalues = torch.exp(values - maxes)
sums = sum(indices, mvalues, size, row=row) # row/column sums]
return mvalues.log() - sums.log()
def rowpnorm(indices, values, size, p, row=True):
"""
Row or column p-norms a sparse matrix
:param indices:
:param values:
:param size:
:param row:
:return:
"""
pvalues = torch.pow(values, p)
sums = sum(indices, pvalues, size, row=row)
return torch.pow(sums, 1.0/p)
def itmax(indices, values, size, its=10, p=2, row=True):
"""
Iterative computation of row max
:param indices:
:param values:
:param size:
:param p:
:param row:
:param cuda:
:return:
"""
epsilon = 0.00000001
# create an initial vector with all values made positive
# weights = values - values.min()
weights = F.softplus(values)
weights = weights / (sum(indices, weights, size) + epsilon)
# iterate, weights converges to a one-hot vector
for i in range(its):
weights = weights.pow(p)
sums = sum(indices, weights, size, row=row) # row/column sums
weights = weights/sums
return sum(indices, values * weights, size, row=row)
def sum(indices, values, size, row=True):
"""
Sum the rows or columns of a sparse matrix, and redistribute the
results back to the non-sparse row/column entries
Arguments are interpreted as defining sparse matrix. Any extra dimensions
as treated as batch.
:return:
"""
assert len(indices.size()) == len(values.size()) + 1
if len(indices.size()) == 2:
# add batch dim
indices = indices[None, :, :]
values = values[None, :]
bdims = None
else:
# fold up batch dim
bdims = indices.size()[:-2]
k, r = indices.size()[-2:]
assert bdims == values.size()[:-1]
assert values.size()[-1] == k
indices = indices.view(-1, k, r)
values = values.view(-1, k)
b, k, r = indices.size()
if row:
ones = torch.ones((size[1], 1), device=d(indices))
else:
ones = torch.ones((size[0], 1), device=d(indices))
# transpose the matrix
indices = torch.cat([indices[:, :, 1:2], indices[:, :, 0:1]], dim=1)
s, _ = ones.size()
ones = ones[None, :, :].expand(b, s, 1).contiguous()
sums = batchmm(indices, values, size, ones) # row/column sums
bindex = torch.arange(b, device=d(indices))[:, None].expand(b, indices.size(1))
sums = sums[bindex, indices[:, :, 0], 0]
if bdims is None:
return sums.view(k)
return sums.view(*bdims + (k,))
| mit |
axbaretto/beam | sdks/python/apache_beam/internal/clients/bigquery/bigquery_v2_messages.py | 19 | 81881 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Generated message classes for bigquery version v2.
A data platform for customers to create, manage, share and query data.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'bigquery'
class BigqueryDatasetsDeleteRequest(_messages.Message):
"""A BigqueryDatasetsDeleteRequest object.
Fields:
datasetId: Dataset ID of dataset being deleted
deleteContents: If True, delete all the tables in the dataset. If False
and the dataset contains tables, the request will fail. Default is False
projectId: Project ID of the dataset being deleted
"""
datasetId = _messages.StringField(1, required=True)
deleteContents = _messages.BooleanField(2)
projectId = _messages.StringField(3, required=True)
class BigqueryDatasetsDeleteResponse(_messages.Message):
"""An empty BigqueryDatasetsDelete response."""
class BigqueryDatasetsGetRequest(_messages.Message):
"""A BigqueryDatasetsGetRequest object.
Fields:
datasetId: Dataset ID of the requested dataset
projectId: Project ID of the requested dataset
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
class BigqueryDatasetsInsertRequest(_messages.Message):
"""A BigqueryDatasetsInsertRequest object.
Fields:
dataset: A Dataset resource to be passed as the request body.
projectId: Project ID of the new dataset
"""
dataset = _messages.MessageField('Dataset', 1)
projectId = _messages.StringField(2, required=True)
class BigqueryDatasetsListRequest(_messages.Message):
"""A BigqueryDatasetsListRequest object.
Fields:
all: Whether to list all datasets, including hidden ones
maxResults: The maximum number of results to return
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: Project ID of the datasets to be listed
"""
all = _messages.BooleanField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
class BigqueryDatasetsPatchRequest(_messages.Message):
"""A BigqueryDatasetsPatchRequest object.
Fields:
dataset: A Dataset resource to be passed as the request body.
datasetId: Dataset ID of the dataset being updated
projectId: Project ID of the dataset being updated
"""
dataset = _messages.MessageField('Dataset', 1)
datasetId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
class BigqueryDatasetsUpdateRequest(_messages.Message):
"""A BigqueryDatasetsUpdateRequest object.
Fields:
dataset: A Dataset resource to be passed as the request body.
datasetId: Dataset ID of the dataset being updated
projectId: Project ID of the dataset being updated
"""
dataset = _messages.MessageField('Dataset', 1)
datasetId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
class BigqueryJobsCancelRequest(_messages.Message):
"""A BigqueryJobsCancelRequest object.
Fields:
jobId: [Required] Job ID of the job to cancel
projectId: [Required] Project ID of the job to cancel
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
class BigqueryJobsGetQueryResultsRequest(_messages.Message):
"""A BigqueryJobsGetQueryResultsRequest object.
Fields:
jobId: [Required] Job ID of the query job
maxResults: Maximum number of results to read
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: [Required] Project ID of the query job
startIndex: Zero-based index of the starting row
timeoutMs: How long to wait for the query to complete, in milliseconds,
before returning. Default is 10 seconds. If the timeout passes before
the job completes, the 'jobComplete' field in the response will be false
"""
jobId = _messages.StringField(1, required=True)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
startIndex = _messages.IntegerField(5, variant=_messages.Variant.UINT64)
timeoutMs = _messages.IntegerField(6, variant=_messages.Variant.UINT32)
class BigqueryJobsGetRequest(_messages.Message):
"""A BigqueryJobsGetRequest object.
Fields:
jobId: [Required] Job ID of the requested job
projectId: [Required] Project ID of the requested job
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
class BigqueryJobsInsertRequest(_messages.Message):
"""A BigqueryJobsInsertRequest object.
Fields:
job: A Job resource to be passed as the request body.
projectId: Project ID of the project that will be billed for the job
"""
job = _messages.MessageField('Job', 1)
projectId = _messages.StringField(2, required=True)
class BigqueryJobsListRequest(_messages.Message):
"""A BigqueryJobsListRequest object.
Enums:
ProjectionValueValuesEnum: Restrict information returned to a set of
selected fields
StateFilterValueValuesEnum: Filter for job state
Fields:
allUsers: Whether to display jobs owned by all users in the project.
Default false
maxResults: Maximum number of results to return
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: Project ID of the jobs to list
projection: Restrict information returned to a set of selected fields
stateFilter: Filter for job state
"""
class ProjectionValueValuesEnum(_messages.Enum):
"""Restrict information returned to a set of selected fields
Values:
full: Includes all job data
minimal: Does not include the job configuration
"""
full = 0
minimal = 1
class StateFilterValueValuesEnum(_messages.Enum):
"""Filter for job state
Values:
done: Finished jobs
pending: Pending jobs
running: Running jobs
"""
done = 0
pending = 1
running = 2
allUsers = _messages.BooleanField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
projection = _messages.EnumField('ProjectionValueValuesEnum', 5)
stateFilter = _messages.EnumField('StateFilterValueValuesEnum', 6, repeated=True)
class BigqueryJobsQueryRequest(_messages.Message):
"""A BigqueryJobsQueryRequest object.
Fields:
projectId: Project ID of the project billed for the query
queryRequest: A QueryRequest resource to be passed as the request body.
"""
projectId = _messages.StringField(1, required=True)
queryRequest = _messages.MessageField('QueryRequest', 2)
class BigqueryProjectsListRequest(_messages.Message):
"""A BigqueryProjectsListRequest object.
Fields:
maxResults: Maximum number of results to return
pageToken: Page token, returned by a previous call, to request the next
page of results
"""
maxResults = _messages.IntegerField(1, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(2)
class BigqueryTabledataInsertAllRequest(_messages.Message):
"""A BigqueryTabledataInsertAllRequest object.
Fields:
datasetId: Dataset ID of the destination table.
projectId: Project ID of the destination table.
tableDataInsertAllRequest: A TableDataInsertAllRequest resource to be
passed as the request body.
tableId: Table ID of the destination table.
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
tableDataInsertAllRequest = _messages.MessageField('TableDataInsertAllRequest', 3)
tableId = _messages.StringField(4, required=True)
class BigqueryTabledataListRequest(_messages.Message):
"""A BigqueryTabledataListRequest object.
Fields:
datasetId: Dataset ID of the table to read
maxResults: Maximum number of results to return
pageToken: Page token, returned by a previous call, identifying the result
set
projectId: Project ID of the table to read
startIndex: Zero-based index of the starting row to read
tableId: Table ID of the table to read
"""
datasetId = _messages.StringField(1, required=True)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
startIndex = _messages.IntegerField(5, variant=_messages.Variant.UINT64)
tableId = _messages.StringField(6, required=True)
class BigqueryTablesDeleteRequest(_messages.Message):
"""A BigqueryTablesDeleteRequest object.
Fields:
datasetId: Dataset ID of the table to delete
projectId: Project ID of the table to delete
tableId: Table ID of the table to delete
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
tableId = _messages.StringField(3, required=True)
class BigqueryTablesDeleteResponse(_messages.Message):
"""An empty BigqueryTablesDelete response."""
class BigqueryTablesGetRequest(_messages.Message):
"""A BigqueryTablesGetRequest object.
Fields:
datasetId: Dataset ID of the requested table
projectId: Project ID of the requested table
tableId: Table ID of the requested table
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
tableId = _messages.StringField(3, required=True)
class BigqueryTablesInsertRequest(_messages.Message):
"""A BigqueryTablesInsertRequest object.
Fields:
datasetId: Dataset ID of the new table
projectId: Project ID of the new table
table: A Table resource to be passed as the request body.
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
table = _messages.MessageField('Table', 3)
class BigqueryTablesListRequest(_messages.Message):
"""A BigqueryTablesListRequest object.
Fields:
datasetId: Dataset ID of the tables to list
maxResults: Maximum number of results to return
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: Project ID of the tables to list
"""
datasetId = _messages.StringField(1, required=True)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
class BigqueryTablesPatchRequest(_messages.Message):
"""A BigqueryTablesPatchRequest object.
Fields:
datasetId: Dataset ID of the table to update
projectId: Project ID of the table to update
table: A Table resource to be passed as the request body.
tableId: Table ID of the table to update
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
table = _messages.MessageField('Table', 3)
tableId = _messages.StringField(4, required=True)
class BigqueryTablesUpdateRequest(_messages.Message):
"""A BigqueryTablesUpdateRequest object.
Fields:
datasetId: Dataset ID of the table to update
projectId: Project ID of the table to update
table: A Table resource to be passed as the request body.
tableId: Table ID of the table to update
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
table = _messages.MessageField('Table', 3)
tableId = _messages.StringField(4, required=True)
class BigtableColumn(_messages.Message):
"""A BigtableColumn object.
Fields:
encoding: [Optional] The encoding of the values when the type is not
STRING. Acceptable encoding values are: TEXT - indicates values are
alphanumeric text strings. BINARY - indicates values are encoded using
HBase Bytes.toBytes family of functions. 'encoding' can also be set at
the column family level. However, the setting at this level takes
precedence if 'encoding' is set at both levels.
fieldName: [Optional] If the qualifier is not a valid BigQuery field
identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier
must be provided as the column field name and is used as field name in
queries.
onlyReadLatest: [Optional] If this is set, only the latest version of
value in this column are exposed. 'onlyReadLatest' can also be set at
the column family level. However, the setting at this level takes
precedence if 'onlyReadLatest' is set at both levels.
qualifierEncoded: [Required] Qualifier of the column. Columns in the
parent column family that has this exact qualifier are exposed as .
field. If the qualifier is valid UTF-8 string, it can be specified in
the qualifier_string field. Otherwise, a base-64 encoded value must be
set to qualifier_encoded. The column field name is the same as the
column qualifier. However, if the qualifier is not a valid BigQuery
field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid
identifier must be provided as field_name.
qualifierString: A string attribute.
type: [Optional] The type to convert the value in cells of this column.
The values are expected to be encoded using HBase Bytes.toBytes function
when using the BINARY encoding value. Following BigQuery types are
allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Defaut
type is BYTES. 'type' can also be set at the column family level.
However, the setting at this level takes precedence if 'type' is set at
both levels.
"""
encoding = _messages.StringField(1)
fieldName = _messages.StringField(2)
onlyReadLatest = _messages.BooleanField(3)
qualifierEncoded = _messages.BytesField(4)
qualifierString = _messages.StringField(5)
type = _messages.StringField(6)
class BigtableColumnFamily(_messages.Message):
"""A BigtableColumnFamily object.
Fields:
columns: [Optional] Lists of columns that should be exposed as individual
fields as opposed to a list of (column name, value) pairs. All columns
whose qualifier matches a qualifier in this list can be accessed as ..
Other columns can be accessed as a list through .Column field.
encoding: [Optional] The encoding of the values when the type is not
STRING. Acceptable encoding values are: TEXT - indicates values are
alphanumeric text strings. BINARY - indicates values are encoded using
HBase Bytes.toBytes family of functions. This can be overridden for a
specific column by listing that column in 'columns' and specifying an
encoding for it.
familyId: Identifier of the column family.
onlyReadLatest: [Optional] If this is set only the latest version of value
are exposed for all columns in this column family. This can be
overridden for a specific column by listing that column in 'columns' and
specifying a different setting for that column.
type: [Optional] The type to convert the value in cells of this column
family. The values are expected to be encoded using HBase Bytes.toBytes
function when using the BINARY encoding value. Following BigQuery types
are allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Defaut
type is BYTES. This can be overridden for a specific column by listing
that column in 'columns' and specifying a type for it.
"""
columns = _messages.MessageField('BigtableColumn', 1, repeated=True)
encoding = _messages.StringField(2)
familyId = _messages.StringField(3)
onlyReadLatest = _messages.BooleanField(4)
type = _messages.StringField(5)
class BigtableOptions(_messages.Message):
"""A BigtableOptions object.
Fields:
columnFamilies: [Optional] List of column families to expose in the table
schema along with their types. This list restricts the column families
that can be referenced in queries and specifies their value types. You
can use this list to do type conversions - see the 'type' field for more
details. If you leave this list empty, all column families are present
in the table schema and their values are read as BYTES. During a query
only the column families referenced in that query are read from
Bigtable.
ignoreUnspecifiedColumnFamilies: [Optional] If field is true, then the
column families that are not specified in columnFamilies list are not
exposed in the table schema. Otherwise, they are read with BYTES type
values. The default value is false.
"""
columnFamilies = _messages.MessageField('BigtableColumnFamily', 1, repeated=True)
ignoreUnspecifiedColumnFamilies = _messages.BooleanField(2)
class CsvOptions(_messages.Message):
"""A CsvOptions object.
Fields:
allowJaggedRows: [Optional] Indicates if BigQuery should accept rows that
are missing trailing optional columns. If true, BigQuery treats missing
trailing columns as null values. If false, records with missing trailing
columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. The default
value is false.
allowQuotedNewlines: [Optional] Indicates if BigQuery should allow quoted
data sections that contain newline characters in a CSV file. The default
value is false.
encoding: [Optional] The character encoding of the data. The supported
values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery
decodes the data after the raw, binary data has been split using the
values of the quote and fieldDelimiter properties.
fieldDelimiter: [Optional] The separator for fields in a CSV file.
BigQuery converts the string to ISO-8859-1 encoding, and then uses the
first byte of the encoded string to split the data in its raw, binary
state. BigQuery also supports the escape sequence "\t" to specify a tab
separator. The default value is a comma (',').
quote: [Optional] The value that is used to quote data sections in a CSV
file. BigQuery converts the string to ISO-8859-1 encoding, and then uses
the first byte of the encoded string to split the data in its raw,
binary state. The default value is a double-quote ('"'). If your data
does not contain quoted sections, set the property value to an empty
string. If your data contains quoted newline characters, you must also
set the allowQuotedNewlines property to true.
skipLeadingRows: [Optional] The number of rows at the top of a CSV file
that BigQuery will skip when reading the data. The default value is 0.
This property is useful if you have header rows in the file that should
be skipped.
"""
allowJaggedRows = _messages.BooleanField(1)
allowQuotedNewlines = _messages.BooleanField(2)
encoding = _messages.StringField(3)
fieldDelimiter = _messages.StringField(4)
quote = _messages.StringField(5, default=u'"')
skipLeadingRows = _messages.IntegerField(6, variant=_messages.Variant.INT32)
class Dataset(_messages.Message):
"""A Dataset object.
Messages:
AccessValueListEntry: A AccessValueListEntry object.
Fields:
access: [Optional] An array of objects that define dataset access for one
or more entities. You can set this property when inserting or updating a
dataset in order to control who is allowed to access the data. If
unspecified at dataset creation time, BigQuery adds default dataset
access for the following entities: access.specialGroup: projectReaders;
access.role: READER; access.specialGroup: projectWriters; access.role:
WRITER; access.specialGroup: projectOwners; access.role: OWNER;
access.userByEmail: [dataset creator email]; access.role: OWNER;
creationTime: [Output-only] The time when this dataset was created, in
milliseconds since the epoch.
datasetReference: [Required] A reference that identifies the dataset.
defaultTableExpirationMs: [Optional] The default lifetime of all tables in
the dataset, in milliseconds. The minimum value is 3600000 milliseconds
(one hour). Once this property is set, all newly-created tables in the
dataset will have an expirationTime property set to the creation time
plus the value in this property, and changing the value will only affect
new tables, not existing ones. When the expirationTime for a given table
is reached, that table will be deleted automatically. If a table's
expirationTime is modified or removed before the table expires, or if
you provide an explicit expirationTime when creating a table, that value
takes precedence over the default expiration time indicated by this
property.
description: [Optional] A user-friendly description of the dataset.
etag: [Output-only] A hash of the resource.
friendlyName: [Optional] A descriptive name for the dataset.
id: [Output-only] The fully-qualified unique name of the dataset in the
format projectId:datasetId. The dataset name without the project name is
given in the datasetId field. When creating a new dataset, leave this
field blank, and instead specify the datasetId field.
kind: [Output-only] The resource type.
lastModifiedTime: [Output-only] The date when this dataset or any of its
tables was last modified, in milliseconds since the epoch.
location: [Experimental] The geographic location where the dataset should
reside. Possible values include EU and US. The default value is US.
selfLink: [Output-only] A URL that can be used to access the resource
again. You can use this URL in Get or Update requests to the resource.
"""
class AccessValueListEntry(_messages.Message):
"""A AccessValueListEntry object.
Fields:
domain: [Pick one] A domain to grant access to. Any users signed in with
the domain specified will be granted the specified access. Example:
"example.com".
groupByEmail: [Pick one] An email address of a Google Group to grant
access to.
role: [Required] Describes the rights granted to the user specified by
the other member of the access object. The following string values are
supported: READER, WRITER, OWNER.
specialGroup: [Pick one] A special group to grant access to. Possible
values include: projectOwners: Owners of the enclosing project.
projectReaders: Readers of the enclosing project. projectWriters:
Writers of the enclosing project. allAuthenticatedUsers: All
authenticated BigQuery users.
userByEmail: [Pick one] An email address of a user to grant access to.
For example: [email protected].
view: [Pick one] A view from a different dataset to grant access to.
Queries executed against that view will have read access to tables in
this dataset. The role field is not required when this field is set.
If that view is updated by any user, access to the view needs to be
granted again via an update operation.
"""
domain = _messages.StringField(1)
groupByEmail = _messages.StringField(2)
role = _messages.StringField(3)
specialGroup = _messages.StringField(4)
userByEmail = _messages.StringField(5)
view = _messages.MessageField('TableReference', 6)
access = _messages.MessageField('AccessValueListEntry', 1, repeated=True)
creationTime = _messages.IntegerField(2)
datasetReference = _messages.MessageField('DatasetReference', 3)
defaultTableExpirationMs = _messages.IntegerField(4)
description = _messages.StringField(5)
etag = _messages.StringField(6)
friendlyName = _messages.StringField(7)
id = _messages.StringField(8)
kind = _messages.StringField(9, default=u'bigquery#dataset')
lastModifiedTime = _messages.IntegerField(10)
location = _messages.StringField(11)
selfLink = _messages.StringField(12)
class DatasetList(_messages.Message):
"""A DatasetList object.
Messages:
DatasetsValueListEntry: A DatasetsValueListEntry object.
Fields:
datasets: An array of the dataset resources in the project. Each resource
contains basic information. For full information about a particular
dataset resource, use the Datasets: get method. This property is omitted
when there are no datasets in the project.
etag: A hash value of the results page. You can use this property to
determine if the page has changed since the last request.
kind: The list type. This property always returns the value
"bigquery#datasetList".
nextPageToken: A token that can be used to request the next results page.
This property is omitted on the final results page.
"""
class DatasetsValueListEntry(_messages.Message):
"""A DatasetsValueListEntry object.
Fields:
datasetReference: The dataset reference. Use this property to access
specific parts of the dataset's ID, such as project ID or dataset ID.
friendlyName: A descriptive name for the dataset, if one exists.
id: The fully-qualified, unique, opaque ID of the dataset.
kind: The resource type. This property always returns the value
"bigquery#dataset".
"""
datasetReference = _messages.MessageField('DatasetReference', 1)
friendlyName = _messages.StringField(2)
id = _messages.StringField(3)
kind = _messages.StringField(4, default=u'bigquery#dataset')
datasets = _messages.MessageField('DatasetsValueListEntry', 1, repeated=True)
etag = _messages.StringField(2)
kind = _messages.StringField(3, default=u'bigquery#datasetList')
nextPageToken = _messages.StringField(4)
class DatasetReference(_messages.Message):
"""A DatasetReference object.
Fields:
datasetId: [Required] A unique ID for this dataset, without the project
name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or
underscores (_). The maximum length is 1,024 characters.
projectId: [Optional] The ID of the project containing this dataset.
"""
datasetId = _messages.StringField(1)
projectId = _messages.StringField(2)
class ErrorProto(_messages.Message):
"""A ErrorProto object.
Fields:
debugInfo: Debugging information. This property is internal to Google and
should not be used.
location: Specifies where the error occurred, if present.
message: A human-readable description of the error.
reason: A short error code that summarizes the error.
"""
debugInfo = _messages.StringField(1)
location = _messages.StringField(2)
message = _messages.StringField(3)
reason = _messages.StringField(4)
class ExplainQueryStage(_messages.Message):
"""A ExplainQueryStage object.
Fields:
computeRatioAvg: Relative amount of time the average shard spent on CPU-
bound tasks.
computeRatioMax: Relative amount of time the slowest shard spent on CPU-
bound tasks.
id: Unique ID for stage within plan.
name: Human-readable name for stage.
readRatioAvg: Relative amount of time the average shard spent reading
input.
readRatioMax: Relative amount of time the slowest shard spent reading
input.
recordsRead: Number of records read into the stage.
recordsWritten: Number of records written by the stage.
steps: List of operations within the stage in dependency order
(approximately chronological).
waitRatioAvg: Relative amount of time the average shard spent waiting to
be scheduled.
waitRatioMax: Relative amount of time the slowest shard spent waiting to
be scheduled.
writeRatioAvg: Relative amount of time the average shard spent on writing
output.
writeRatioMax: Relative amount of time the slowest shard spent on writing
output.
"""
computeRatioAvg = _messages.FloatField(1)
computeRatioMax = _messages.FloatField(2)
id = _messages.IntegerField(3)
name = _messages.StringField(4)
readRatioAvg = _messages.FloatField(5)
readRatioMax = _messages.FloatField(6)
recordsRead = _messages.IntegerField(7)
recordsWritten = _messages.IntegerField(8)
steps = _messages.MessageField('ExplainQueryStep', 9, repeated=True)
waitRatioAvg = _messages.FloatField(10)
waitRatioMax = _messages.FloatField(11)
writeRatioAvg = _messages.FloatField(12)
writeRatioMax = _messages.FloatField(13)
class ExplainQueryStep(_messages.Message):
"""A ExplainQueryStep object.
Fields:
kind: Machine-readable operation type.
substeps: Human-readable stage descriptions.
"""
kind = _messages.StringField(1)
substeps = _messages.StringField(2, repeated=True)
class ExternalDataConfiguration(_messages.Message):
"""A ExternalDataConfiguration object.
Fields:
autodetect: [Experimental] Try to detect schema and format options
automatically. Any option specified explicitly will be honored.
bigtableOptions: [Optional] Additional options if sourceFormat is set to
BIGTABLE.
compression: [Optional] The compression type of the data source. Possible
values include GZIP and NONE. The default value is NONE. This setting is
ignored for Google Cloud Bigtable, Google Cloud Datastore backups and
Avro formats.
csvOptions: Additional properties to set if sourceFormat is set to CSV.
ignoreUnknownValues: [Optional] Indicates if BigQuery should allow extra
values that are not represented in the table schema. If true, the extra
values are ignored. If false, records with extra columns are treated as
bad records, and if there are too many bad records, an invalid error is
returned in the job result. The default value is false. The sourceFormat
property determines what BigQuery treats as an extra value: CSV:
Trailing columns JSON: Named values that don't match any column names
Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore
backups: This setting is ignored. Avro: This setting is ignored.
maxBadRecords: [Optional] The maximum number of bad records that BigQuery
can ignore when reading data. If the number of bad records exceeds this
value, an invalid error is returned in the job result. The default value
is 0, which requires that all records are valid. This setting is ignored
for Google Cloud Bigtable, Google Cloud Datastore backups and Avro
formats.
schema: [Optional] The schema for the data. Schema is required for CSV and
JSON formats. Schema is disallowed for Google Cloud Bigtable, Cloud
Datastore backups, and Avro formats.
sourceFormat: [Required] The data format. For CSV files, specify "CSV".
For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro
files, specify "AVRO". For Google Cloud Datastore backups, specify
"DATASTORE_BACKUP". [Experimental] For Google Cloud Bigtable, specify
"BIGTABLE". Please note that reading from Google Cloud Bigtable is
experimental and has to be enabled for your project. Please contact
Google Cloud Support to enable this for your project.
sourceUris: [Required] The fully-qualified URIs that point to your data in
Google Cloud. For Google Cloud Storage URIs: Each URI can contain one
'*' wildcard character and it must come after the 'bucket' name. Size
limits related to load jobs apply to external data sources, plus an
additional limit of 10 GB maximum size across all URIs. For Google Cloud
Bigtable URIs: Exactly one URI can be specified and it has be a fully
specified and valid HTTPS URL for a Google Cloud Bigtable table. For
Google Cloud Datastore backups, exactly one URI can be specified, and it
must end with '.backup_info'. Also, the '*' wildcard character is not
allowed.
"""
autodetect = _messages.BooleanField(1)
bigtableOptions = _messages.MessageField('BigtableOptions', 2)
compression = _messages.StringField(3)
csvOptions = _messages.MessageField('CsvOptions', 4)
ignoreUnknownValues = _messages.BooleanField(5)
maxBadRecords = _messages.IntegerField(6, variant=_messages.Variant.INT32)
schema = _messages.MessageField('TableSchema', 7)
sourceFormat = _messages.StringField(8)
sourceUris = _messages.StringField(9, repeated=True)
class GetQueryResultsResponse(_messages.Message):
"""A GetQueryResultsResponse object.
Fields:
cacheHit: Whether the query result was fetched from the query cache.
errors: [Output-only] All errors and warnings encountered during the
running of the job. Errors here do not necessarily mean that the job has
completed or was unsuccessful.
etag: A hash of this response.
jobComplete: Whether the query has completed or not. If rows or totalRows
are present, this will always be true. If this is false, totalRows will
not be available.
jobReference: Reference to the BigQuery Job that was created to run the
query. This field will be present even if the original request timed
out, in which case GetQueryResults can be used to read the results once
the query has completed. Since this API only returns the first page of
results, subsequent pages can be fetched via the same mechanism
(GetQueryResults).
kind: The resource type of the response.
pageToken: A token used for paging results.
rows: An object with as many results as can be contained within the
maximum permitted reply size. To get any additional rows, you can call
GetQueryResults and specify the jobReference returned above. Present
only when the query completes successfully.
schema: The schema of the results. Present only when the query completes
successfully.
totalBytesProcessed: The total number of bytes processed for this query.
totalRows: The total number of rows in the complete query result set,
which can be more than the number of rows in this single page of
results. Present only when the query completes successfully.
"""
cacheHit = _messages.BooleanField(1)
errors = _messages.MessageField('ErrorProto', 2, repeated=True)
etag = _messages.StringField(3)
jobComplete = _messages.BooleanField(4)
jobReference = _messages.MessageField('JobReference', 5)
kind = _messages.StringField(6, default=u'bigquery#getQueryResultsResponse')
pageToken = _messages.StringField(7)
rows = _messages.MessageField('TableRow', 8, repeated=True)
schema = _messages.MessageField('TableSchema', 9)
totalBytesProcessed = _messages.IntegerField(10)
totalRows = _messages.IntegerField(11, variant=_messages.Variant.UINT64)
class IntervalPartitionConfiguration(_messages.Message):
"""A IntervalPartitionConfiguration object.
Fields:
expirationMs: A string attribute.
type: A string attribute.
"""
expirationMs = _messages.IntegerField(1)
type = _messages.StringField(2)
class Job(_messages.Message):
"""A Job object.
Fields:
configuration: [Required] Describes the job configuration.
etag: [Output-only] A hash of this resource.
id: [Output-only] Opaque ID field of the job
jobReference: [Optional] Reference describing the unique-per-user name of
the job.
kind: [Output-only] The type of the resource.
selfLink: [Output-only] A URL that can be used to access this resource
again.
statistics: [Output-only] Information about the job, including starting
time and ending time of the job.
status: [Output-only] The status of this job. Examine this value when
polling an asynchronous job to see if the job is complete.
user_email: [Output-only] Email address of the user who ran the job.
"""
configuration = _messages.MessageField('JobConfiguration', 1)
etag = _messages.StringField(2)
id = _messages.StringField(3)
jobReference = _messages.MessageField('JobReference', 4)
kind = _messages.StringField(5, default=u'bigquery#job')
selfLink = _messages.StringField(6)
statistics = _messages.MessageField('JobStatistics', 7)
status = _messages.MessageField('JobStatus', 8)
user_email = _messages.StringField(9)
class JobCancelResponse(_messages.Message):
"""A JobCancelResponse object.
Fields:
job: The final state of the job.
kind: The resource type of the response.
"""
job = _messages.MessageField('Job', 1)
kind = _messages.StringField(2, default=u'bigquery#jobCancelResponse')
class JobConfiguration(_messages.Message):
"""A JobConfiguration object.
Fields:
copy: [Pick one] Copies a table.
dryRun: [Optional] If set, don't actually run this job. A valid query will
return a mostly empty response with some processing statistics, while an
invalid query will return the same error it would if it wasn't a dry
run. Behavior of non-query jobs is undefined.
extract: [Pick one] Configures an extract job.
load: [Pick one] Configures a load job.
query: [Pick one] Configures a query job.
"""
copy = _messages.MessageField('JobConfigurationTableCopy', 1)
dryRun = _messages.BooleanField(2)
extract = _messages.MessageField('JobConfigurationExtract', 3)
load = _messages.MessageField('JobConfigurationLoad', 4)
query = _messages.MessageField('JobConfigurationQuery', 5)
class JobConfigurationExtract(_messages.Message):
"""A JobConfigurationExtract object.
Fields:
compression: [Optional] The compression type to use for exported files.
Possible values include GZIP and NONE. The default value is NONE.
destinationFormat: [Optional] The exported file format. Possible values
include CSV, NEWLINE_DELIMITED_JSON and AVRO. The default value is CSV.
Tables with nested or repeated fields cannot be exported as CSV.
destinationUri: [Pick one] DEPRECATED: Use destinationUris instead,
passing only one URI as necessary. The fully-qualified Google Cloud
Storage URI where the extracted table should be written.
destinationUris: [Pick one] A list of fully-qualified Google Cloud Storage
URIs where the extracted table should be written.
fieldDelimiter: [Optional] Delimiter to use between fields in the exported
data. Default is ','
printHeader: [Optional] Whether to print out a header row in the results.
Default is true.
sourceTable: [Required] A reference to the table being exported.
"""
compression = _messages.StringField(1)
destinationFormat = _messages.StringField(2)
destinationUri = _messages.StringField(3)
destinationUris = _messages.StringField(4, repeated=True)
fieldDelimiter = _messages.StringField(5)
printHeader = _messages.BooleanField(6, default=True)
sourceTable = _messages.MessageField('TableReference', 7)
class JobConfigurationLoad(_messages.Message):
"""A JobConfigurationLoad object.
Fields:
allowJaggedRows: [Optional] Accept rows that are missing trailing optional
columns. The missing values are treated as nulls. If false, records with
missing trailing columns are treated as bad records, and if there are
too many bad records, an invalid error is returned in the job result.
The default value is false. Only applicable to CSV, ignored for other
formats.
allowQuotedNewlines: Indicates if BigQuery should allow quoted data
sections that contain newline characters in a CSV file. The default
value is false.
createDisposition: [Optional] Specifies whether the job is allowed to
create new tables. The following values are supported: CREATE_IF_NEEDED:
If the table does not exist, BigQuery creates the table. CREATE_NEVER:
The table must already exist. If it does not, a 'notFound' error is
returned in the job result. The default value is CREATE_IF_NEEDED.
Creation, truncation and append actions occur as one atomic update upon
job completion.
destinationTable: [Required] The destination table to load the data into.
encoding: [Optional] The character encoding of the data. The supported
values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery
decodes the data after the raw, binary data has been split using the
values of the quote and fieldDelimiter properties.
fieldDelimiter: [Optional] The separator for fields in a CSV file. The
separator can be any ISO-8859-1 single-byte character. To use a
character in the range 128-255, you must encode the character as UTF8.
BigQuery converts the string to ISO-8859-1 encoding, and then uses the
first byte of the encoded string to split the data in its raw, binary
state. BigQuery also supports the escape sequence "\t" to specify a tab
separator. The default value is a comma (',').
ignoreUnknownValues: [Optional] Indicates if BigQuery should allow extra
values that are not represented in the table schema. If true, the extra
values are ignored. If false, records with extra columns are treated as
bad records, and if there are too many bad records, an invalid error is
returned in the job result. The default value is false. The sourceFormat
property determines what BigQuery treats as an extra value: CSV:
Trailing columns JSON: Named values that don't match any column names
maxBadRecords: [Optional] The maximum number of bad records that BigQuery
can ignore when running the job. If the number of bad records exceeds
this value, an invalid error is returned in the job result. The default
value is 0, which requires that all records are valid.
projectionFields: [Experimental] If sourceFormat is set to
"DATASTORE_BACKUP", indicates which entity properties to load into
BigQuery from a Cloud Datastore backup. Property names are case
sensitive and must be top-level properties. If no properties are
specified, BigQuery loads all properties. If any named property isn't
found in the Cloud Datastore backup, an invalid error is returned in the
job result.
quote: [Optional] The value that is used to quote data sections in a CSV
file. BigQuery converts the string to ISO-8859-1 encoding, and then uses
the first byte of the encoded string to split the data in its raw,
binary state. The default value is a double-quote ('"'). If your data
does not contain quoted sections, set the property value to an empty
string. If your data contains quoted newline characters, you must also
set the allowQuotedNewlines property to true.
schema: [Optional] The schema for the destination table. The schema can be
omitted if the destination table already exists, or if you're loading
data from Google Cloud Datastore.
schemaInline: [Deprecated] The inline schema. For CSV schemas, specify as
"Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER,
baz:FLOAT".
schemaInlineFormat: [Deprecated] The format of the schemaInline property.
skipLeadingRows: [Optional] The number of rows at the top of a CSV file
that BigQuery will skip when loading the data. The default value is 0.
This property is useful if you have header rows in the file that should
be skipped.
sourceFormat: [Optional] The format of the data files. For CSV files,
specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For
newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro,
specify "AVRO". The default value is CSV.
sourceUris: [Required] The fully-qualified URIs that point to your data in
Google Cloud Storage. Each URI can contain one '*' wildcard character
and it must come after the 'bucket' name.
writeDisposition: [Optional] Specifies the action that occurs if the
destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
table data. WRITE_APPEND: If the table already exists, BigQuery appends
the data to the table. WRITE_EMPTY: If the table already exists and
contains data, a 'duplicate' error is returned in the job result. The
default value is WRITE_APPEND. Each action is atomic and only occurs if
BigQuery is able to complete the job successfully. Creation, truncation
and append actions occur as one atomic update upon job completion.
"""
allowJaggedRows = _messages.BooleanField(1)
allowQuotedNewlines = _messages.BooleanField(2)
createDisposition = _messages.StringField(3)
destinationTable = _messages.MessageField('TableReference', 4)
encoding = _messages.StringField(5)
fieldDelimiter = _messages.StringField(6)
ignoreUnknownValues = _messages.BooleanField(7)
maxBadRecords = _messages.IntegerField(8, variant=_messages.Variant.INT32)
projectionFields = _messages.StringField(9, repeated=True)
quote = _messages.StringField(10, default=u'"')
schema = _messages.MessageField('TableSchema', 11)
schemaInline = _messages.StringField(12)
schemaInlineFormat = _messages.StringField(13)
skipLeadingRows = _messages.IntegerField(14, variant=_messages.Variant.INT32)
sourceFormat = _messages.StringField(15)
sourceUris = _messages.StringField(16, repeated=True)
writeDisposition = _messages.StringField(17)
class JobConfigurationQuery(_messages.Message):
"""A JobConfigurationQuery object.
Messages:
TableDefinitionsValue: [Optional] If querying an external data source
outside of BigQuery, describes the data format, location and other
properties of the data source. By defining these properties, the data
source can then be queried as if it were a standard BigQuery table.
Fields:
allowLargeResults: If true, allows the query to produce arbitrarily large
result tables at a slight cost in performance. Requires destinationTable
to be set.
createDisposition: [Optional] Specifies whether the job is allowed to
create new tables. The following values are supported: CREATE_IF_NEEDED:
If the table does not exist, BigQuery creates the table. CREATE_NEVER:
The table must already exist. If it does not, a 'notFound' error is
returned in the job result. The default value is CREATE_IF_NEEDED.
Creation, truncation and append actions occur as one atomic update upon
job completion.
defaultDataset: [Optional] Specifies the default dataset to use for
unqualified table names in the query.
destinationTable: [Optional] Describes the table where the query results
should be stored. If not present, a new table will be created to store
the results.
flattenResults: [Optional] Flattens all nested and repeated fields in the
query results. The default value is true. allowLargeResults must be true
if this is set to false.
maximumBillingTier: [Optional] Limits the billing tier for this job.
Queries that have resource usage beyond this tier will fail (without
incurring a charge). If unspecified, this will be set to your project
default.
preserveNulls: [Deprecated] This property is deprecated.
priority: [Optional] Specifies a priority for the query. Possible values
include INTERACTIVE and BATCH. The default value is INTERACTIVE.
query: [Required] BigQuery SQL query to execute.
tableDefinitions: [Optional] If querying an external data source outside
of BigQuery, describes the data format, location and other properties of
the data source. By defining these properties, the data source can then
be queried as if it were a standard BigQuery table.
useLegacySql: [Experimental] Specifies whether to use BigQuery's legacy
SQL dialect for this query. The default value is true. If set to false,
the query will use BigQuery's updated SQL dialect with improved
standards compliance. When using BigQuery's updated SQL, the values of
allowLargeResults and flattenResults are ignored. Queries with
useLegacySql set to false will be run as if allowLargeResults is true
and flattenResults is false.
useQueryCache: [Optional] Whether to look for the result in the query
cache. The query cache is a best-effort cache that will be flushed
whenever tables in the query are modified. Moreover, the query cache is
only available when a query does not have a destination table specified.
The default value is true.
userDefinedFunctionResources: [Experimental] Describes user-defined
function resources used in the query.
writeDisposition: [Optional] Specifies the action that occurs if the
destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
table data. WRITE_APPEND: If the table already exists, BigQuery appends
the data to the table. WRITE_EMPTY: If the table already exists and
contains data, a 'duplicate' error is returned in the job result. The
default value is WRITE_EMPTY. Each action is atomic and only occurs if
BigQuery is able to complete the job successfully. Creation, truncation
and append actions occur as one atomic update upon job completion.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class TableDefinitionsValue(_messages.Message):
"""[Optional] If querying an external data source outside of BigQuery,
describes the data format, location and other properties of the data
source. By defining these properties, the data source can then be queried
as if it were a standard BigQuery table.
Messages:
AdditionalProperty: An additional property for a TableDefinitionsValue
object.
Fields:
additionalProperties: Additional properties of type
TableDefinitionsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a TableDefinitionsValue object.
Fields:
key: Name of the additional property.
value: A ExternalDataConfiguration attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('ExternalDataConfiguration', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
allowLargeResults = _messages.BooleanField(1)
createDisposition = _messages.StringField(2)
defaultDataset = _messages.MessageField('DatasetReference', 3)
destinationTable = _messages.MessageField('TableReference', 4)
flattenResults = _messages.BooleanField(5, default=True)
maximumBillingTier = _messages.IntegerField(6, variant=_messages.Variant.INT32, default=1)
preserveNulls = _messages.BooleanField(7)
priority = _messages.StringField(8)
query = _messages.StringField(9)
tableDefinitions = _messages.MessageField('TableDefinitionsValue', 10)
useLegacySql = _messages.BooleanField(11)
useQueryCache = _messages.BooleanField(12, default=True)
userDefinedFunctionResources = _messages.MessageField('UserDefinedFunctionResource', 13, repeated=True)
writeDisposition = _messages.StringField(14)
class JobConfigurationTableCopy(_messages.Message):
"""A JobConfigurationTableCopy object.
Fields:
createDisposition: [Optional] Specifies whether the job is allowed to
create new tables. The following values are supported: CREATE_IF_NEEDED:
If the table does not exist, BigQuery creates the table. CREATE_NEVER:
The table must already exist. If it does not, a 'notFound' error is
returned in the job result. The default value is CREATE_IF_NEEDED.
Creation, truncation and append actions occur as one atomic update upon
job completion.
destinationTable: [Required] The destination table
sourceTable: [Pick one] Source table to copy.
sourceTables: [Pick one] Source tables to copy.
writeDisposition: [Optional] Specifies the action that occurs if the
destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
table data. WRITE_APPEND: If the table already exists, BigQuery appends
the data to the table. WRITE_EMPTY: If the table already exists and
contains data, a 'duplicate' error is returned in the job result. The
default value is WRITE_EMPTY. Each action is atomic and only occurs if
BigQuery is able to complete the job successfully. Creation, truncation
and append actions occur as one atomic update upon job completion.
"""
createDisposition = _messages.StringField(1)
destinationTable = _messages.MessageField('TableReference', 2)
sourceTable = _messages.MessageField('TableReference', 3)
sourceTables = _messages.MessageField('TableReference', 4, repeated=True)
writeDisposition = _messages.StringField(5)
class JobList(_messages.Message):
"""A JobList object.
Messages:
JobsValueListEntry: A JobsValueListEntry object.
Fields:
etag: A hash of this page of results.
jobs: List of jobs that were requested.
kind: The resource type of the response.
nextPageToken: A token to request the next page of results.
"""
class JobsValueListEntry(_messages.Message):
"""A JobsValueListEntry object.
Fields:
configuration: [Full-projection-only] Specifies the job configuration.
errorResult: A result object that will be present only if the job has
failed.
id: Unique opaque ID of the job.
jobReference: Job reference uniquely identifying the job.
kind: The resource type.
state: Running state of the job. When the state is DONE, errorResult can
be checked to determine whether the job succeeded or failed.
statistics: [Output-only] Information about the job, including starting
time and ending time of the job.
status: [Full-projection-only] Describes the state of the job.
user_email: [Full-projection-only] Email address of the user who ran the
job.
"""
configuration = _messages.MessageField('JobConfiguration', 1)
errorResult = _messages.MessageField('ErrorProto', 2)
id = _messages.StringField(3)
jobReference = _messages.MessageField('JobReference', 4)
kind = _messages.StringField(5, default=u'bigquery#job')
state = _messages.StringField(6)
statistics = _messages.MessageField('JobStatistics', 7)
status = _messages.MessageField('JobStatus', 8)
user_email = _messages.StringField(9)
etag = _messages.StringField(1)
jobs = _messages.MessageField('JobsValueListEntry', 2, repeated=True)
kind = _messages.StringField(3, default=u'bigquery#jobList')
nextPageToken = _messages.StringField(4)
class JobReference(_messages.Message):
"""A JobReference object.
Fields:
jobId: [Required] The ID of the job. The ID must contain only letters
(a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum
length is 1,024 characters.
projectId: [Required] The ID of the project containing this job.
"""
jobId = _messages.StringField(1)
projectId = _messages.StringField(2)
class JobStatistics(_messages.Message):
"""A JobStatistics object.
Fields:
creationTime: [Output-only] Creation time of this job, in milliseconds
since the epoch. This field will be present on all jobs.
endTime: [Output-only] End time of this job, in milliseconds since the
epoch. This field will be present whenever a job is in the DONE state.
extract: [Output-only] Statistics for an extract job.
load: [Output-only] Statistics for a load job.
query: [Output-only] Statistics for a query job.
startTime: [Output-only] Start time of this job, in milliseconds since the
epoch. This field will be present when the job transitions from the
PENDING state to either RUNNING or DONE.
totalBytesProcessed: [Output-only] [Deprecated] Use the bytes processed in
the query statistics instead.
"""
creationTime = _messages.IntegerField(1)
endTime = _messages.IntegerField(2)
extract = _messages.MessageField('JobStatistics4', 3)
load = _messages.MessageField('JobStatistics3', 4)
query = _messages.MessageField('JobStatistics2', 5)
startTime = _messages.IntegerField(6)
totalBytesProcessed = _messages.IntegerField(7)
class JobStatistics2(_messages.Message):
"""A JobStatistics2 object.
Fields:
billingTier: [Output-only] Billing tier for the job.
cacheHit: [Output-only] Whether the query result was fetched from the
query cache.
queryPlan: [Output-only, Experimental] Describes execution plan for the
query as a list of stages.
referencedTables: [Output-only, Experimental] Referenced tables for the
job. Queries that reference more than 50 tables will not have a complete
list.
totalBytesBilled: [Output-only] Total bytes billed for the job.
totalBytesProcessed: [Output-only] Total bytes processed for the job.
"""
billingTier = _messages.IntegerField(1, variant=_messages.Variant.INT32)
cacheHit = _messages.BooleanField(2)
queryPlan = _messages.MessageField('ExplainQueryStage', 3, repeated=True)
referencedTables = _messages.MessageField('TableReference', 4, repeated=True)
totalBytesBilled = _messages.IntegerField(5)
totalBytesProcessed = _messages.IntegerField(6)
class JobStatistics3(_messages.Message):
"""A JobStatistics3 object.
Fields:
inputFileBytes: [Output-only] Number of bytes of source data in a load
job.
inputFiles: [Output-only] Number of source files in a load job.
outputBytes: [Output-only] Size of the loaded data in bytes. Note that
while a load job is in the running state, this value may change.
outputRows: [Output-only] Number of rows imported in a load job. Note that
while an import job is in the running state, this value may change.
"""
inputFileBytes = _messages.IntegerField(1)
inputFiles = _messages.IntegerField(2)
outputBytes = _messages.IntegerField(3)
outputRows = _messages.IntegerField(4)
class JobStatistics4(_messages.Message):
"""A JobStatistics4 object.
Fields:
destinationUriFileCounts: [Output-only] Number of files per destination
URI or URI pattern specified in the extract configuration. These values
will be in the same order as the URIs specified in the 'destinationUris'
field.
"""
destinationUriFileCounts = _messages.IntegerField(1, repeated=True)
class JobStatus(_messages.Message):
"""A JobStatus object.
Fields:
errorResult: [Output-only] Final error result of the job. If present,
indicates that the job has completed and was unsuccessful.
errors: [Output-only] All errors encountered during the running of the
job. Errors here do not necessarily mean that the job has completed or
was unsuccessful.
state: [Output-only] Running state of the job.
"""
errorResult = _messages.MessageField('ErrorProto', 1)
errors = _messages.MessageField('ErrorProto', 2, repeated=True)
state = _messages.StringField(3)
@encoding.MapUnrecognizedFields('additionalProperties')
class JsonObject(_messages.Message):
"""Represents a single JSON object.
Messages:
AdditionalProperty: An additional property for a JsonObject object.
Fields:
additionalProperties: Additional properties of type JsonObject
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a JsonObject object.
Fields:
key: Name of the additional property.
value: A JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
JsonValue = extra_types.JsonValue
class ProjectList(_messages.Message):
"""A ProjectList object.
Messages:
ProjectsValueListEntry: A ProjectsValueListEntry object.
Fields:
etag: A hash of the page of results
kind: The type of list.
nextPageToken: A token to request the next page of results.
projects: Projects to which you have at least READ access.
totalItems: The total number of projects in the list.
"""
class ProjectsValueListEntry(_messages.Message):
"""A ProjectsValueListEntry object.
Fields:
friendlyName: A descriptive name for this project.
id: An opaque ID of this project.
kind: The resource type.
numericId: The numeric ID of this project.
projectReference: A unique reference to this project.
"""
friendlyName = _messages.StringField(1)
id = _messages.StringField(2)
kind = _messages.StringField(3, default=u'bigquery#project')
numericId = _messages.IntegerField(4, variant=_messages.Variant.UINT64)
projectReference = _messages.MessageField('ProjectReference', 5)
etag = _messages.StringField(1)
kind = _messages.StringField(2, default=u'bigquery#projectList')
nextPageToken = _messages.StringField(3)
projects = _messages.MessageField('ProjectsValueListEntry', 4, repeated=True)
totalItems = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class ProjectReference(_messages.Message):
"""A ProjectReference object.
Fields:
projectId: [Required] ID of the project. Can be either the numeric ID or
the assigned ID of the project.
"""
projectId = _messages.StringField(1)
class QueryRequest(_messages.Message):
"""A QueryRequest object.
Fields:
defaultDataset: [Optional] Specifies the default datasetId and projectId
to assume for any unqualified table names in the query. If not set, all
table names in the query string must be qualified in the format
'datasetId.tableId'.
dryRun: [Optional] If set to true, BigQuery doesn't run the job. Instead,
if the query is valid, BigQuery returns statistics about the job such as
how many bytes would be processed. If the query is invalid, an error
returns. The default value is false.
kind: The resource type of the request.
maxResults: [Optional] The maximum number of rows of data to return per
page of results. Setting this flag to a small value such as 1000 and
then paging through results might improve reliability when the query
result set is large. In addition to this limit, responses are also
limited to 10 MB. By default, there is no maximum row count, and only
the byte limit applies.
preserveNulls: [Deprecated] This property is deprecated.
query: [Required] A query string, following the BigQuery query syntax, of
the query to execute. Example: "SELECT count(f1) FROM
[myProjectId:myDatasetId.myTableId]".
timeoutMs: [Optional] How long to wait for the query to complete, in
milliseconds, before the request times out and returns. Note that this
is only a timeout for the request, not the query. If the query takes
longer to run than the timeout value, the call returns without any
results and with the 'jobComplete' flag set to false. You can call
GetQueryResults() to wait for the query to complete and read the
results. The default value is 10000 milliseconds (10 seconds).
useLegacySql: [Experimental] Specifies whether to use BigQuery's legacy
SQL dialect for this query. The default value is true. If set to false,
the query will use BigQuery's updated SQL dialect with improved
standards compliance. When using BigQuery's updated SQL, the values of
allowLargeResults and flattenResults are ignored. Queries with
useLegacySql set to false will be run as if allowLargeResults is true
and flattenResults is false.
useQueryCache: [Optional] Whether to look for the result in the query
cache. The query cache is a best-effort cache that will be flushed
whenever tables in the query are modified. The default value is true.
"""
defaultDataset = _messages.MessageField('DatasetReference', 1)
dryRun = _messages.BooleanField(2)
kind = _messages.StringField(3, default=u'bigquery#queryRequest')
maxResults = _messages.IntegerField(4, variant=_messages.Variant.UINT32)
preserveNulls = _messages.BooleanField(5)
query = _messages.StringField(6)
timeoutMs = _messages.IntegerField(7, variant=_messages.Variant.UINT32)
useLegacySql = _messages.BooleanField(8)
useQueryCache = _messages.BooleanField(9, default=True)
class QueryResponse(_messages.Message):
"""A QueryResponse object.
Fields:
cacheHit: Whether the query result was fetched from the query cache.
errors: [Output-only] All errors and warnings encountered during the
running of the job. Errors here do not necessarily mean that the job has
completed or was unsuccessful.
jobComplete: Whether the query has completed or not. If rows or totalRows
are present, this will always be true. If this is false, totalRows will
not be available.
jobReference: Reference to the Job that was created to run the query. This
field will be present even if the original request timed out, in which
case GetQueryResults can be used to read the results once the query has
completed. Since this API only returns the first page of results,
subsequent pages can be fetched via the same mechanism
(GetQueryResults).
kind: The resource type.
pageToken: A token used for paging results.
rows: An object with as many results as can be contained within the
maximum permitted reply size. To get any additional rows, you can call
GetQueryResults and specify the jobReference returned above.
schema: The schema of the results. Present only when the query completes
successfully.
totalBytesProcessed: The total number of bytes processed for this query.
If this query was a dry run, this is the number of bytes that would be
processed if the query were run.
totalRows: The total number of rows in the complete query result set,
which can be more than the number of rows in this single page of
results.
"""
cacheHit = _messages.BooleanField(1)
errors = _messages.MessageField('ErrorProto', 2, repeated=True)
jobComplete = _messages.BooleanField(3)
jobReference = _messages.MessageField('JobReference', 4)
kind = _messages.StringField(5, default=u'bigquery#queryResponse')
pageToken = _messages.StringField(6)
rows = _messages.MessageField('TableRow', 7, repeated=True)
schema = _messages.MessageField('TableSchema', 8)
totalBytesProcessed = _messages.IntegerField(9)
totalRows = _messages.IntegerField(10, variant=_messages.Variant.UINT64)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
AltValueValuesEnum: Data format for the response.
Fields:
alt: Data format for the response.
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters. Overrides userIp if both are provided.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
userIp: IP address of the site where the request originates. Use this if
you want to enforce per-user limits.
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for the response.
Values:
csv: Responses with Content-Type of text/csv
json: Responses with Content-Type of application/json
"""
csv = 0
json = 1
alt = _messages.EnumField('AltValueValuesEnum', 1, default=u'json')
fields = _messages.StringField(2)
key = _messages.StringField(3)
oauth_token = _messages.StringField(4)
prettyPrint = _messages.BooleanField(5, default=True)
quotaUser = _messages.StringField(6)
trace = _messages.StringField(7)
userIp = _messages.StringField(8)
class Streamingbuffer(_messages.Message):
"""A Streamingbuffer object.
Fields:
estimatedBytes: [Output-only] A lower-bound estimate of the number of
bytes currently in the streaming buffer.
estimatedRows: [Output-only] A lower-bound estimate of the number of rows
currently in the streaming buffer.
oldestEntryTime: [Output-only] Contains the timestamp of the oldest entry
in the streaming buffer, in milliseconds since the epoch, if the
streaming buffer is available.
"""
estimatedBytes = _messages.IntegerField(1, variant=_messages.Variant.UINT64)
estimatedRows = _messages.IntegerField(2, variant=_messages.Variant.UINT64)
oldestEntryTime = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
class Table(_messages.Message):
"""A Table object.
Fields:
creationTime: [Output-only] The time when this table was created, in
milliseconds since the epoch.
description: [Optional] A user-friendly description of this table.
etag: [Output-only] A hash of this resource.
expirationTime: [Optional] The time when this table expires, in
milliseconds since the epoch. If not present, the table will persist
indefinitely. Expired tables will be deleted and their storage
reclaimed.
externalDataConfiguration: [Optional] Describes the data format, location,
and other properties of a table stored outside of BigQuery. By defining
these properties, the data source can then be queried as if it were a
standard BigQuery table.
friendlyName: [Optional] A descriptive name for this table.
id: [Output-only] An opaque ID uniquely identifying the table.
kind: [Output-only] The type of the resource.
lastModifiedTime: [Output-only] The time when this table was last
modified, in milliseconds since the epoch.
location: [Output-only] The geographic location where the table resides.
This value is inherited from the dataset.
numBytes: [Output-only] The size of this table in bytes, excluding any
data in the streaming buffer.
numRows: [Output-only] The number of rows of data in this table, excluding
any data in the streaming buffer.
partitionConfigurations: [Experimental] List of partition configurations
for this table. Currently only one configuration can be specified and it
can only be an interval partition with type daily.
schema: [Optional] Describes the schema of this table.
selfLink: [Output-only] A URL that can be used to access this resource
again.
streamingBuffer: [Output-only] Contains information regarding this table's
streaming buffer, if one is present. This field will be absent if the
table is not being streamed to or if there is no data in the streaming
buffer.
tableReference: [Required] Reference describing the ID of this table.
type: [Output-only] Describes the table type. The following values are
supported: TABLE: A normal BigQuery table. VIEW: A virtual table defined
by a SQL query. EXTERNAL: A table that references data stored in an
external storage system, such as Google Cloud Storage. The default value
is TABLE.
view: [Optional] The view definition.
"""
creationTime = _messages.IntegerField(1)
description = _messages.StringField(2)
etag = _messages.StringField(3)
expirationTime = _messages.IntegerField(4)
externalDataConfiguration = _messages.MessageField('ExternalDataConfiguration', 5)
friendlyName = _messages.StringField(6)
id = _messages.StringField(7)
kind = _messages.StringField(8, default=u'bigquery#table')
lastModifiedTime = _messages.IntegerField(9, variant=_messages.Variant.UINT64)
location = _messages.StringField(10)
numBytes = _messages.IntegerField(11)
numRows = _messages.IntegerField(12, variant=_messages.Variant.UINT64)
partitionConfigurations = _messages.MessageField('TablePartitionConfiguration', 13, repeated=True)
schema = _messages.MessageField('TableSchema', 14)
selfLink = _messages.StringField(15)
streamingBuffer = _messages.MessageField('Streamingbuffer', 16)
tableReference = _messages.MessageField('TableReference', 17)
type = _messages.StringField(18)
view = _messages.MessageField('ViewDefinition', 19)
class TableCell(_messages.Message):
"""A TableCell object.
Fields:
v: A extra_types.JsonValue attribute.
"""
v = _messages.MessageField('extra_types.JsonValue', 1)
class TableDataInsertAllRequest(_messages.Message):
"""A TableDataInsertAllRequest object.
Messages:
RowsValueListEntry: A RowsValueListEntry object.
Fields:
ignoreUnknownValues: [Optional] Accept rows that contain values that do
not match the schema. The unknown values are ignored. Default is false,
which treats unknown values as errors.
kind: The resource type of the response.
rows: The rows to insert.
skipInvalidRows: [Optional] Insert all valid rows of a request, even if
invalid rows exist. The default value is false, which causes the entire
request to fail if any invalid rows exist.
templateSuffix: [Experimental] If specified, treats the destination table
as a base template, and inserts the rows into an instance table named
"{destination}{templateSuffix}". BigQuery will manage creation of the
instance table, using the schema of the base template table. See
https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-
tables for considerations when working with templates tables.
"""
class RowsValueListEntry(_messages.Message):
"""A RowsValueListEntry object.
Fields:
insertId: [Optional] A unique ID for each row. BigQuery uses this
property to detect duplicate insertion requests on a best-effort
basis.
json: [Required] A JSON object that contains a row of data. The object's
properties and values must match the destination table's schema.
"""
insertId = _messages.StringField(1)
json = _messages.MessageField('JsonObject', 2)
ignoreUnknownValues = _messages.BooleanField(1)
kind = _messages.StringField(2, default=u'bigquery#tableDataInsertAllRequest')
rows = _messages.MessageField('RowsValueListEntry', 3, repeated=True)
skipInvalidRows = _messages.BooleanField(4)
templateSuffix = _messages.StringField(5)
class TableDataInsertAllResponse(_messages.Message):
"""A TableDataInsertAllResponse object.
Messages:
InsertErrorsValueListEntry: A InsertErrorsValueListEntry object.
Fields:
insertErrors: An array of errors for rows that were not inserted.
kind: The resource type of the response.
"""
class InsertErrorsValueListEntry(_messages.Message):
"""A InsertErrorsValueListEntry object.
Fields:
errors: Error information for the row indicated by the index property.
index: The index of the row that error applies to.
"""
errors = _messages.MessageField('ErrorProto', 1, repeated=True)
index = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
insertErrors = _messages.MessageField('InsertErrorsValueListEntry', 1, repeated=True)
kind = _messages.StringField(2, default=u'bigquery#tableDataInsertAllResponse')
class TableDataList(_messages.Message):
"""A TableDataList object.
Fields:
etag: A hash of this page of results.
kind: The resource type of the response.
pageToken: A token used for paging results. Providing this token instead
of the startIndex parameter can help you retrieve stable results when an
underlying table is changing.
rows: Rows of results.
totalRows: The total number of rows in the complete table.
"""
etag = _messages.StringField(1)
kind = _messages.StringField(2, default=u'bigquery#tableDataList')
pageToken = _messages.StringField(3)
rows = _messages.MessageField('TableRow', 4, repeated=True)
totalRows = _messages.IntegerField(5)
class TableFieldSchema(_messages.Message):
"""A TableFieldSchema object.
Fields:
description: [Optional] The field description. The maximum length is 16K
characters.
fields: [Optional] Describes the nested schema fields if the type property
is set to RECORD.
mode: [Optional] The field mode. Possible values include NULLABLE,
REQUIRED and REPEATED. The default value is NULLABLE.
name: [Required] The field name. The name must contain only letters (a-z,
A-Z), numbers (0-9), or underscores (_), and must start with a letter or
underscore. The maximum length is 128 characters.
type: [Required] The field data type. Possible values include STRING,
BYTES, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD
indicates that the field contains a nested schema).
"""
description = _messages.StringField(1)
fields = _messages.MessageField('TableFieldSchema', 2, repeated=True)
mode = _messages.StringField(3)
name = _messages.StringField(4)
type = _messages.StringField(5)
class TableList(_messages.Message):
"""A TableList object.
Messages:
TablesValueListEntry: A TablesValueListEntry object.
Fields:
etag: A hash of this page of results.
kind: The type of list.
nextPageToken: A token to request the next page of results.
tables: Tables in the requested dataset.
totalItems: The total number of tables in the dataset.
"""
class TablesValueListEntry(_messages.Message):
"""A TablesValueListEntry object.
Fields:
friendlyName: The user-friendly name for this table.
id: An opaque ID of the table
kind: The resource type.
tableReference: A reference uniquely identifying the table.
type: The type of table. Possible values are: TABLE, VIEW.
"""
friendlyName = _messages.StringField(1)
id = _messages.StringField(2)
kind = _messages.StringField(3, default=u'bigquery#table')
tableReference = _messages.MessageField('TableReference', 4)
type = _messages.StringField(5)
etag = _messages.StringField(1)
kind = _messages.StringField(2, default=u'bigquery#tableList')
nextPageToken = _messages.StringField(3)
tables = _messages.MessageField('TablesValueListEntry', 4, repeated=True)
totalItems = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class TablePartitionConfiguration(_messages.Message):
"""[Required] A partition configuration. Only one type of partition should
be configured.
Fields:
interval: [Pick one] Configures an interval partition.
"""
interval = _messages.MessageField('IntervalPartitionConfiguration', 1)
class TableReference(_messages.Message):
"""A TableReference object.
Fields:
datasetId: [Required] The ID of the dataset containing this table.
projectId: [Required] The ID of the project containing this table.
tableId: [Required] The ID of the table. The ID must contain only letters
(a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is
1,024 characters.
"""
datasetId = _messages.StringField(1)
projectId = _messages.StringField(2)
tableId = _messages.StringField(3)
class TableRow(_messages.Message):
"""A TableRow object.
Fields:
f: Represents a single row in the result set, consisting of one or more
fields.
"""
f = _messages.MessageField('TableCell', 1, repeated=True)
class TableSchema(_messages.Message):
"""A TableSchema object.
Fields:
fields: Describes the fields in a table.
"""
fields = _messages.MessageField('TableFieldSchema', 1, repeated=True)
class UserDefinedFunctionResource(_messages.Message):
"""A UserDefinedFunctionResource object.
Fields:
inlineCode: [Pick one] An inline resource that contains code for a user-
defined function (UDF). Providing a inline code resource is equivalent
to providing a URI for a file containing the same code.
resourceUri: [Pick one] A code resource to load from a Google Cloud
Storage URI (gs://bucket/path).
"""
inlineCode = _messages.StringField(1)
resourceUri = _messages.StringField(2)
class ViewDefinition(_messages.Message):
"""A ViewDefinition object.
Fields:
query: [Required] A query that BigQuery executes when the view is
referenced.
userDefinedFunctionResources: [Experimental] Describes user-defined
function resources used in the query.
"""
query = _messages.StringField(1)
userDefinedFunctionResources = _messages.MessageField('UserDefinedFunctionResource', 2, repeated=True)
| apache-2.0 |
wdavilaneto/ultron | mlcore/pipeline/tf_idf.py | 1 | 1813 | import csv
import pyorient
from sklearn.feature_extraction.text import TfidfVectorizer
from mlcore import TextService
from wordlist import WordLists
wordlist = WordLists()
text_service = TextService()
def get_corpus():
orientdb = pyorient.OrientDB("localhost", 2424)
orientdb.connect("root", "root")
orientdb.db_open('datascience', "root", "root")
# result = orientdb.command("SELECT * FROM Documento")
result = orientdb.command("SELECT * FROM StemDocument")
corpus = []
for each in result:
corpus.append(text_service.clean_text(each.stemmed_text))
return corpus
def tiidfy(texts):
tf = TfidfVectorizer(tokenizer=text_service.tokenize, ngram_range=(1, 2), encoding='UNICODE')
tfidf_matrix = tf.fit_transform(texts)
feature_names = tf.get_feature_names()
print(feature_names[30:40])
# print(tfidf_matrix)
dense = tfidf_matrix.todense()
somthing = dense[0].tolist()[0]
phrase_scores = [pair for pair in zip(range(0, len(somthing)), somthing) if pair[1] > 0]
sorted_phrase_scores = sorted(phrase_scores, key=lambda t: t[1] * -1)
for phrase, score in [(feature_names[word_id], score) for (word_id, score) in sorted_phrase_scores][:100]:
print('{0: <20} {1}'.format(phrase, score))
with open("tfidf_parecer.csv", "w+") as file:
writer = csv.writer(file, delimiter=",")
writer.writerow(["EpisodeId", "Phrase", "Score"])
doc_id = 0
for doc in dense:
word_id = 0
for score in doc.tolist()[0]:
if score > 0:
word = feature_names[word_id]
writer.writerow([doc_id + 1, word, score])
word_id += 1
doc_id += 1
categories = []
if __name__ == "__main__":
tiidfy(get_corpus())
| gpl-3.0 |
DistrictDataLabs/yellowbrick | tests/test_base.py | 1 | 10562 | # tests.test_base.py
# Assertions for the base classes and abstract hierarchy.
#
# Author: Rebecca Bilbro
# Author: Benjamin Bengfort
# Author: Neal Humphrey
# Created: Sat Oct 08 18:34:30 2016 -0400
#
# Copyright (C) 2016 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: test_base.py [83131ef] [email protected] $
"""
Assertions for the base classes and abstract hierarchy.
"""
##########################################################################
## Imports
##########################################################################
import pytest
import matplotlib.pyplot as plt
from yellowbrick.base import *
from yellowbrick.base import VisualizerGrid
from yellowbrick.datasets import load_occupancy
from yellowbrick.exceptions import YellowbrickWarning
from yellowbrick.exceptions import YellowbrickValueError
from unittest.mock import patch
from unittest.mock import MagicMock
from tests.rand import RandomVisualizer
from tests.base import IS_WINDOWS_OR_CONDA, VisualTestCase
from sklearn.svm import LinearSVC
from sklearn.datasets import make_classification
##########################################################################
## Base Cases
##########################################################################
class TestBaseClasses(VisualTestCase):
"""
Tests for the high-level API of Yellowbrick and base classes
"""
def test_visualizer_ax_property(self):
"""
Test the ax property on the Visualizer
"""
viz = Visualizer()
assert viz._ax is None
assert viz.ax is not None
viz.ax = "foo"
assert viz._ax == "foo"
assert viz.ax == "foo"
def test_visualizer_fig_property(self):
"""
Test the fig property on the Visualizer
"""
viz = Visualizer()
assert viz._fig is None
assert viz.fig is not None
viz.fig = "foo"
assert viz._fig == "foo"
assert viz.fig == "foo"
def test_size_property(self):
"""
Test the size property on the base Visualizer
"""
fig = plt.figure(figsize=(1, 2))
viz = Visualizer()
assert viz._size is None
assert viz.size is not None
fsize = fig.get_size_inches() * fig.get_dpi()
assert all(viz.size) == all(fsize)
viz.size = (1080, 720)
assert viz._size == (1080, 720)
assert viz.size == (1080, 720)
fsize = fig.get_size_inches() * fig.get_dpi()
assert all(viz.size) == all(fsize)
def test_visualizer_fit_returns_self(self):
"""
Assert that all visualizers return self
"""
viz = Visualizer()
assert viz.fit([]) is viz
def test_draw_interface(self):
"""
Assert that draw cannot be called at the base level
"""
with pytest.raises(NotImplementedError):
viz = Visualizer()
viz.draw()
def test_finalize_interface(self):
"""
Assert finalize returns the finalized axes
"""
viz = Visualizer()
assert viz.finalize() is viz.ax
@patch("yellowbrick.base.plt")
def test_show_interface(self, mock_plt):
"""
Test show calls plt.show and other figure finalization correctly
"""
class CustomVisualizer(Visualizer):
pass
_, ax = plt.subplots()
viz = CustomVisualizer(ax=ax)
viz.finalize = MagicMock()
assert viz.show() is ax
viz.finalize.assert_called_once_with()
mock_plt.show.assert_called_once_with()
mock_plt.savefig.assert_not_called()
@patch("yellowbrick.base.plt")
def test_show_savefig_interface(self, mock_plt):
"""
Test show calls plt.savefig and other figure finalization correctly
"""
class CustomVisualizer(Visualizer):
pass
_, ax = plt.subplots()
viz = CustomVisualizer(ax=ax)
viz.finalize = MagicMock()
assert viz.show(outpath="test.png") is ax
viz.finalize.assert_called_once_with()
mock_plt.show.assert_not_called()
mock_plt.savefig.assert_called_once_with("test.png")
@patch("yellowbrick.base.plt")
def test_show_warns(self, mock_plt):
"""
Test show issues a warning when no axes has been modified
"""
class CustomVisualizer(Visualizer):
pass
with pytest.warns(YellowbrickWarning):
viz = CustomVisualizer()
assert viz.show() is not None
def test_poof_deprecated(self):
"""
Test that poof issues a deprecation warning
"""
class CustomVisualizer(Visualizer):
pass
viz = CustomVisualizer()
viz.show = MagicMock()
with pytest.warns(DeprecationWarning, match="please use show"):
viz.poof()
viz.show.assert_called_once()
##########################################################################
## ModelVisualizer Cases
##########################################################################
class MockModelVisualizer(ModelVisualizer):
def __init__(
self,
estimator,
ax=None,
fig=None,
is_fitted="auto",
param1='foo',
param2='bar',
**kwargs
):
super().__init__(estimator, ax=ax, fig=fig, is_fitted=is_fitted, **kwargs)
self.param1 = param1
self.param2 = param2
class TestModelVisualizer(VisualTestCase):
"""
Tests for the ModelVisualizer and wrapped estimator.
"""
def test_get_params(self):
"""
Test get_params from visualizer and wrapped estimator
"""
model = LinearSVC()
oz = MockModelVisualizer(model)
params = oz.get_params()
for key, val in model.get_params().items():
assert key in params
assert params[key] == val
for key in ('ax', 'fig', 'is_fitted', 'param1', 'param2'):
assert key in params
def test_set_params(self):
"""
Test set_params from visualizer to wrapped estimator
"""
model = LinearSVC()
oz = MockModelVisualizer(model)
orig = oz.get_params()
changes = {
"param1": 'water',
"is_fitted": False,
"estimator__C": 8.8,
"intercept_scaling": 4.2,
"dual": False,
}
oz.set_params(**changes)
params = oz.get_params()
for key, val in params.items():
if key in changes:
assert val == changes[key]
elif key == "C":
assert val == changes["estimator__C"]
else:
assert val == orig[key]
##########################################################################
## ScoreVisualizer Cases
##########################################################################
class MockVisualizer(ScoreVisualizer):
"""
Mock for a downstream score visualizer
"""
def fit(self, X, y):
super(MockVisualizer, self).fit(X, y)
class TestScoreVisualizer(VisualTestCase):
"""
Tests for the ScoreVisualizer
"""
def test_with_fitted(self):
"""
Test that visualizer properly handles an already-fitted model
"""
X, y = load_occupancy(return_dataset=True).to_numpy()
model = LinearSVC().fit(X, y)
classes = ["unoccupied", "occupied"]
with patch.object(model, "fit") as mockfit:
oz = MockVisualizer(model, classes=classes)
oz.fit(X, y)
mockfit.assert_not_called()
with patch.object(model, "fit") as mockfit:
oz = MockVisualizer(model, classes=classes, is_fitted=True)
oz.fit(X, y)
mockfit.assert_not_called()
with patch.object(model, "fit") as mockfit:
oz = MockVisualizer(model, classes=classes, is_fitted=False)
oz.fit(X, y)
mockfit.assert_called_once_with(X, y)
##########################################################################
## Visual Grid Cases
##########################################################################
@pytest.mark.filterwarnings("ignore:Matplotlib is currently using agg")
class TestVisualizerGrid(VisualTestCase):
"""
Tests for the VisualizerGrid layout class
"""
@pytest.mark.xfail(
IS_WINDOWS_OR_CONDA,
reason="font rendering different in OS and/or Python; see #892",
)
def test_draw_visualizer_grid(self):
"""
Draw a 4 visualizers grid with default options
"""
visualizers = [RandomVisualizer(random_state=(1 + x) ** 2) for x in range(4)]
X, y = make_classification(random_state=78)
grid = VisualizerGrid(visualizers)
grid.fit(X, y)
# show is required here (do not replace with finalize)!
assert grid.show() is not None
self.assert_images_similar(grid, tol=1.0)
@pytest.mark.xfail(
IS_WINDOWS_OR_CONDA,
reason="font rendering different in OS and/or Python; see #892",
)
def test_draw_with_rows(self):
"""
Draw 2 visualizers in their own row
"""
visualizers = [
RandomVisualizer(random_state=63),
RandomVisualizer(random_state=36),
]
X, y = make_classification(random_state=87)
grid = VisualizerGrid(visualizers, nrows=2)
grid.fit(X, y)
# show is required here (do not replace with finalize)!
assert grid.show() is not None
self.assert_images_similar(grid, tol=1.0)
@pytest.mark.xfail(
IS_WINDOWS_OR_CONDA,
reason="font rendering different in OS and/or Python; see #892",
)
def test_draw_with_cols(self):
"""
Draw 2 visualizers in their own column
"""
visualizers = [
RandomVisualizer(random_state=633),
RandomVisualizer(random_state=336),
]
X, y = make_classification(random_state=187)
grid = VisualizerGrid(visualizers, ncols=2)
grid.fit(X, y)
# show is required here (do not replace with finalize)!
assert grid.show() is not None
self.assert_images_similar(grid, tol=1.0)
def test_cant_define_both_rows_cols(self):
"""
Assert that both nrows and ncols cannot be specified
"""
with pytest.raises(YellowbrickValueError):
VisualizerGrid([], ncols=2, nrows=2)
| apache-2.0 |
ningchi/scikit-learn | sklearn/naive_bayes.py | 5 | 28162 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
epsilon = 1e-9
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
thientu/scikit-learn | sklearn/utils/testing.py | 71 | 26178 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed"
" in 0.19: use the safer and more generic"
" if_safe_multiprocessing_with_blas instead",
DeprecationWarning)
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing
Under Python < 3.4 and POSIX (e.g. Linux or OSX), using multiprocessing in
conjunction with some implementation of BLAS (or other libraries that
manage an internal posix thread pool) can cause a crash or a freeze of the
Python process.
Under Python 3.4 and later, joblib uses the forkserver mode of
multiprocessing which does not trigger this problem.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OSX with.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin' and sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
AlexGidiotis/Multimodal-Gesture-Recognition-with-LSTMs-and-CTC | multimodal_fusion/data_generator.py | 1 | 9623 | import random
import time
import os
import re
import numpy as np
import pandas as pd
from sklearn import preprocessing
from keras.preprocessing import sequence
import keras.callbacks
#====================================================== DATA GENERATOR =================================================================================
# Data generator that will provide training and testing with data. Works with mini batches of audio feat files.
# The data generator is called using the next_train() and next_val() methods.
# Class constructor to initialize the datagenerator object.
class DataGenerator(keras.callbacks.Callback):
def __init__(self,
minibatch_size,
numfeats_skeletal,
numfeats_speech,
maxlen,
nb_classes,
dataset,
val_split=0.2,
absolute_max_sequence_len=35):
# Currently is only 2 files per batch.
self.minibatch_size = minibatch_size
# Maximum length of data sequence.
self.maxlen = maxlen
# 39 mel frequency feats.
self.numfeats_speech = numfeats_speech
# 22 skeletal feats.
self.numfeats_skeletal = numfeats_skeletal
# Size of the validation set.
self.val_split = val_split
# Max number of labels per label sequence.
self.absolute_max_sequence_len = absolute_max_sequence_len
# INdexing variables
self.train_index = 0
self.val_index = 0
# Actually 1-20 classes and 21 is the blank label.
self.nb_classes = nb_classes
# Blank model to use.
self.blank_label = np.array([self.nb_classes - 1])
self.dataset = dataset
if self.dataset == 'train':
self.in_audio_dir = '../data/train_audio'
self.in_file_skeletal = '../data/Training_set_skeletal.csv'
elif self.dataset == 'val':
self.in_audio_dir = '../data/val_audio'
self.in_file_skeletal = '../data/Validation_set_skeletal.csv'
elif self.dataset == 'final':
self.in_audio_dir = '../data/final_audio'
self.in_file_skeletal = '../data/final_set_skeletal.csv'
self.load_dataset()
# Loads and preprocesses the dataset and splits it into training and validation set.
# This runs at initiallization.
def load_dataset(self):
print 'Loading data...'
if self.dataset == 'train':
train_lab_file = '../data/training_oov.csv'
elif self.dataset == 'val':
train_lab_file = '../data/validation.csv'
elif self.dataset == 'final':
train_lab_file = '../data/validation.csv'
labs = pd.read_csv(train_lab_file)
self.labs = labs
self.df_s = pd.read_csv(self.in_file_skeletal)
self.df_s = self.normalize_data()
file_list = os.listdir(self.in_audio_dir)
file_list = sorted([int(re.findall('audio_(\d+).csv',file_name)[0]) for file_name in file_list])
if self.dataset == 'train':
random.seed(10)
random.shuffle(file_list)
split_point = int(len(file_list) * (1 - self.val_split))
self.train_list, self.val_list = file_list[:split_point], file_list[split_point:]
self.train_size = len(self.train_list)
self.val_size = len(self.val_list)
#Make sure that train and validation lists have an even length to avoid mini-batches of size 1
train_mod_by_batch_size = self.train_size % self.minibatch_size
if train_mod_by_batch_size != 0:
del self.train_list[-train_mod_by_batch_size:]
self.train_size -= train_mod_by_batch_size
val_mod_by_batch_size = self.val_size % self.minibatch_size
if val_mod_by_batch_size != 0:
del self.val_list[-val_mod_by_batch_size:]
self.val_size -= val_mod_by_batch_size
else:
self.val_list = file_list
self.val_size = len(self.val_list)
def get_size(self,train):
"""
"""
if train:
return self.train_size
else:
return self.val_size
def get_file_list(self,train):
if train:
return self.train_list
else:
return self.val_list
# Normalize the data to have zero mean and unity variance.
# Called at initiallization.
def normalize_data(self):
data = self.df_s[['lh_v','rh_v','le_v','re_v','lh_dist_rp',
'rh_dist_rp','lh_hip_d','rh_hip_d','le_hip_d','re_hip_d',
'lh_shc_d','rh_shc_d','le_shc_d','re_shc_d','lh_hip_ang',
'rh_hip_ang','lh_shc_ang','rh_shc_ang','lh_el_ang','rh_el_ang']].as_matrix().astype(float)
norm_data = preprocessing.scale(data)
norm_df = pd.DataFrame(norm_data,
columns=['lh_v','rh_v','le_v','re_v','lh_dist_rp',
'rh_dist_rp','lh_hip_d','rh_hip_d','le_hip_d','re_hip_d',
'lh_shc_d','rh_shc_d','le_shc_d','re_shc_d','lh_hip_ang',
'rh_hip_ang','lh_shc_ang','rh_shc_ang','lh_el_ang',
'rh_el_ang'])
norm_df['file_number'] = self.df_s['file_number']
return norm_df
# each time a batch (list of file ids) is requested from train/val/test
# Basic function that returns a batch of training or validation data.
# Returns a dictionary with the required fields for CTC training and a dummy output variable.
def get_batch(self, train):
# number of files in batch is 2
# Select train or validation mode.
if train:
file_list = self.train_list
index = self.train_index
else:
file_list = self.val_list
index = self.val_index
# Get the batch.
try:
batch = file_list[index:(index + self.minibatch_size)]
except:
batch = file_list[index:]
size = len(batch)
# INitialize the variables to be returned.
X_data_a = np.ones([size, self.maxlen, self.numfeats_speech])
labels_a = np.ones([size, self.absolute_max_sequence_len])
input_length_a = np.zeros([size, 1])
label_length_a = np.zeros([size, 1])
X_data_s = np.ones([size, self.maxlen, self.numfeats_skeletal])
labels_s = np.ones([size, self.absolute_max_sequence_len])
input_length_s = np.zeros([size, 1])
label_length_s = np.zeros([size, 1])
# Read batch.
for i in range(len(batch)):
file = batch[i]
audio_file_name = 'audio_' + str(file) + '.csv'
audio_file_path = os.path.join(self.in_audio_dir,audio_file_name)
vf_a = pd.read_csv(audio_file_path).drop(['file_number'],axis=1)
if set(['39', '40']).issubset(vf_a.columns):
vf_a = vf_a.drop(['39','40'],axis=1)
# Downsample by 5 the audio.
vf_a = vf_a.iloc[::5, :].reset_index(drop=True)
vf_s = self.df_s[self.df_s['file_number'] == file]
# Audio
gest_seq_a = vf_a.as_matrix().astype(float)
gest_seq_a = sequence.pad_sequences([gest_seq_a],
maxlen=self.maxlen,
padding='post',
truncating='post',
dtype='float32')
# Skeletal
gest_seq_s = vf_s[['lh_v','rh_v','le_v','re_v','lh_dist_rp',
'rh_dist_rp','lh_hip_d','rh_hip_d','le_hip_d','re_hip_d',
'lh_shc_d','rh_shc_d','le_shc_d','re_shc_d','lh_hip_ang',
'rh_hip_ang','lh_shc_ang','rh_shc_ang','lh_el_ang','rh_el_ang']].as_matrix().astype(float)
gest_seq_s = sequence.pad_sequences([gest_seq_s],
maxlen=self.maxlen,
padding='post',
truncating='post',
dtype='float32')
# THe final set does not need labels since it's meant for testing.
if self.dataset != 'final':
lab_seq = self.labs[self.labs['Id'] == file]
lab_seq = np.array([int(lab) for lab in lab_seq['Sequence'].values[0].split()]).astype('float32')
else:
lab_seq = np.array([0])
# If a sequence is not found insert a blank example and pad.
if lab_seq.shape[0] == 0:
lab_seq = sequence.pad_sequences([self.blank_label],
maxlen=(self.absolute_max_sequence_len),
padding='post',
value=-1)
labels_s[i, :] = lab_seq
labels_a[i, :] = lab_seq
label_length_s[i] = 1
label_length_a[i] = 1
# Else use the save the returned variables.
else:
X_data_a[i, :, :] = gest_seq_a
# Ignore empty examples
try:
X_data_s[i, :, :] = gest_seq_s
except:
print 'blank'
label_length_a[i] = lab_seq.shape[0]
label_length_s[i] = lab_seq.shape[0]
lab_seq = sequence.pad_sequences([lab_seq],
maxlen=(self.absolute_max_sequence_len),
padding='post',
value=-1)
labels_a[i, :] = lab_seq
labels_s[i, :] = lab_seq
input_length_a[i] = (X_data_a[i].shape[0] - 2)
input_length_s[i] = (X_data_s[i].shape[0] - 2)
# Returned values: a dictionary with 4 values
# the_input_audio: audio data sequence
# the_input_skeletal: skeletal data sequence
# the labels: label sequence
# input_length: length of data sequence
# label_length: length of label sequence
# an array of zeros
# outputs: dummy vector of zeros required for keras training
inputs = {'the_input_audio': X_data_a,
'the_input_skeletal': X_data_s,
'the_labels': labels_a,
'input_length': input_length_a,
'label_length': label_length_a,
}
outputs = {'ctc': np.zeros([size])} # dummy data for dummy loss function
return (inputs, outputs)
# Get the next training batch and update index.
# Called by the generator during training.
def next_train(self):
while 1:
ret = self.get_batch(train=True)
self.train_index += self.minibatch_size
if self.train_index >= self.train_size:
self.train_index = 0
yield ret
# Get the next validation batch and update index.
# Called by the generator during validation.
def next_val(self):
while 1:
ret = self.get_batch(train=False)
self.val_index += self.minibatch_size
if self.val_index >= self.val_size:
self.val_index = 0
yield ret
# Save model and weights on epochs end.
# Callback at the end of each epoch.
def on_epoch_end(self, epoch, logs={}):
self.train_index = 0
self.val_index = 0
random.shuffle(self.train_list)
random.shuffle(self.val_list)
model_json = self.model.to_json()
with open("multimodal_ctc_blstm_model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
self.model.save_weights("multimodal_ctc_blstm_weights.h5")
print "Saved model to disk"
| mit |
solashirai/edx-platform | lms/djangoapps/course_api/blocks/tests/test_serializers.py | 6 | 8035 | """
Tests for Course Blocks serializers
"""
from mock import MagicMock
from course_blocks.tests.helpers import EnableTransformerRegistryMixin
from openedx.core.lib.block_structure.transformers import BlockStructureTransformers
from student.tests.factories import UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import ToyCourseFactory
from lms.djangoapps.course_blocks.api import get_course_blocks, COURSE_BLOCK_ACCESS_TRANSFORMERS
from student.roles import CourseStaffRole
from ..transformers.blocks_api import BlocksAPITransformer
from ..serializers import BlockSerializer, BlockDictSerializer
from .helpers import deserialize_usage_key
class TestBlockSerializerBase(EnableTransformerRegistryMixin, SharedModuleStoreTestCase):
"""
Base class for testing BlockSerializer and BlockDictSerializer
"""
@classmethod
def setUpClass(cls):
super(TestBlockSerializerBase, cls).setUpClass()
cls.course = ToyCourseFactory.create()
# Hide the html block
key = cls.course.id.make_usage_key('html', 'secret:toylab')
cls.html_block = cls.store.get_item(key)
cls.html_block.visible_to_staff_only = True
cls.store.update_item(cls.html_block, ModuleStoreEnum.UserID.test)
def setUp(self):
super(TestBlockSerializerBase, self).setUp()
self.user = UserFactory.create()
blocks_api_transformer = BlocksAPITransformer(
block_types_to_count=['video'],
requested_student_view_data=['video'],
)
self.block_structure = get_course_blocks(
self.user,
self.course.location,
BlockStructureTransformers(COURSE_BLOCK_ACCESS_TRANSFORMERS + [blocks_api_transformer]),
)
self.serializer_context = {
'request': MagicMock(),
'block_structure': self.block_structure,
'requested_fields': ['type'],
}
def assert_basic_block(self, block_key_string, serialized_block):
"""
Verifies the given serialized_block when basic fields are requested.
"""
block_key = deserialize_usage_key(block_key_string, self.course.id)
self.assertEquals(
self.block_structure.get_xblock_field(block_key, 'category'),
serialized_block['type'],
)
self.assertEquals(
set(serialized_block.iterkeys()),
{'id', 'type', 'lms_web_url', 'student_view_url'},
)
def add_additional_requested_fields(self, context=None):
"""
Adds additional fields to the requested_fields context for the serializer.
"""
if context is None:
context = self.serializer_context
context['requested_fields'].extend([
'children',
'display_name',
'graded',
'format',
'block_counts',
'student_view_data',
'student_view_multi_device',
'lti_url',
'visible_to_staff_only',
])
def assert_extended_block(self, serialized_block):
"""
Verifies the given serialized_block when additional fields are requested.
"""
self.assertLessEqual(
{
'id', 'type', 'lms_web_url', 'student_view_url',
'display_name', 'graded',
'block_counts', 'student_view_multi_device',
'lti_url',
'visible_to_staff_only',
},
set(serialized_block.iterkeys()),
)
# video blocks should have student_view_data
if serialized_block['type'] == 'video':
self.assertIn('student_view_data', serialized_block)
# html blocks should have student_view_multi_device set to True
if serialized_block['type'] == 'html':
self.assertIn('student_view_multi_device', serialized_block)
self.assertTrue(serialized_block['student_view_multi_device'])
def create_staff_context(self):
"""
Create staff user and course blocks accessible by that user
"""
# Create a staff user to be able to test visible_to_staff_only
staff_user = UserFactory.create()
CourseStaffRole(self.course.location.course_key).add_users(staff_user)
block_structure = get_course_blocks(
staff_user,
self.course.location,
BlockStructureTransformers(COURSE_BLOCK_ACCESS_TRANSFORMERS),
)
return {
'request': MagicMock(),
'block_structure': block_structure,
'requested_fields': ['type'],
}
def assert_staff_fields(self, serialized_block):
"""
Test fields accessed by a staff user
"""
if serialized_block['id'] == unicode(self.html_block.location):
self.assertTrue(serialized_block['visible_to_staff_only'])
else:
self.assertFalse(serialized_block['visible_to_staff_only'])
class TestBlockSerializer(TestBlockSerializerBase):
"""
Tests the BlockSerializer class, which returns a list of blocks.
"""
def create_serializer(self, context=None):
"""
creates a BlockSerializer
"""
if context is None:
context = self.serializer_context
return BlockSerializer(
context['block_structure'], many=True, context=context,
)
def test_basic(self):
serializer = self.create_serializer()
for serialized_block in serializer.data:
self.assert_basic_block(serialized_block['id'], serialized_block)
def test_additional_requested_fields(self):
self.add_additional_requested_fields()
serializer = self.create_serializer()
for serialized_block in serializer.data:
self.assert_extended_block(serialized_block)
def test_staff_fields(self):
"""
Test fields accessed by a staff user
"""
context = self.create_staff_context()
self.add_additional_requested_fields(context)
serializer = self.create_serializer(context)
for serialized_block in serializer.data:
self.assert_extended_block(serialized_block)
self.assert_staff_fields(serialized_block)
class TestBlockDictSerializer(TestBlockSerializerBase):
"""
Tests the BlockDictSerializer class, which returns a dict of blocks key-ed by its block_key.
"""
def create_serializer(self, context=None):
"""
creates a BlockDictSerializer
"""
if context is None:
context = self.serializer_context
return BlockDictSerializer(
context['block_structure'], many=False, context=context,
)
def test_basic(self):
serializer = self.create_serializer()
# verify root
self.assertEquals(serializer.data['root'], unicode(self.block_structure.root_block_usage_key))
# verify blocks
for block_key_string, serialized_block in serializer.data['blocks'].iteritems():
self.assertEquals(serialized_block['id'], block_key_string)
self.assert_basic_block(block_key_string, serialized_block)
def test_additional_requested_fields(self):
self.add_additional_requested_fields()
serializer = self.create_serializer()
for serialized_block in serializer.data['blocks'].itervalues():
self.assert_extended_block(serialized_block)
def test_staff_fields(self):
"""
Test fields accessed by a staff user
"""
context = self.create_staff_context()
self.add_additional_requested_fields(context)
serializer = self.create_serializer(context)
for serialized_block in serializer.data['blocks'].itervalues():
self.assert_extended_block(serialized_block)
self.assert_staff_fields(serialized_block)
| agpl-3.0 |
google/lasr | third_party/chamfer3D/dist_chamfer_3D.py | 1 | 2341 | from torch import nn
from torch.autograd import Function
import torch
import importlib
import os
chamfer_found = importlib.find_loader("chamfer_3D") is not None
if not chamfer_found:
## Cool trick from https://github.com/chrdiller
print("Jitting Chamfer 3D")
from torch.utils.cpp_extension import load
chamfer_3D = load(name="chamfer_3D",
sources=[
"/".join(os.path.abspath(__file__).split('/')[:-1] + ["chamfer_cuda.cpp"]),
"/".join(os.path.abspath(__file__).split('/')[:-1] + ["chamfer3D.cu"]),
])
print("Loaded JIT 3D CUDA chamfer distance")
else:
import chamfer_3D
print("Loaded compiled 3D CUDA chamfer distance")
# Chamfer's distance module @thibaultgroueix
# GPU tensors only
class chamfer_3DFunction(Function):
@staticmethod
def forward(ctx, xyz1, xyz2):
batchsize, n, _ = xyz1.size()
_, m, _ = xyz2.size()
device = xyz1.device
dist1 = torch.zeros(batchsize, n)
dist2 = torch.zeros(batchsize, m)
idx1 = torch.zeros(batchsize, n).type(torch.IntTensor)
idx2 = torch.zeros(batchsize, m).type(torch.IntTensor)
dist1 = dist1.to(device)
dist2 = dist2.to(device)
idx1 = idx1.to(device)
idx2 = idx2.to(device)
torch.cuda.set_device(device)
chamfer_3D.forward(xyz1, xyz2, dist1, dist2, idx1, idx2)
ctx.save_for_backward(xyz1, xyz2, idx1, idx2)
return dist1, dist2, idx1, idx2
@staticmethod
def backward(ctx, graddist1, graddist2, gradidx1, gradidx2):
xyz1, xyz2, idx1, idx2 = ctx.saved_tensors
graddist1 = graddist1.contiguous()
graddist2 = graddist2.contiguous()
device = graddist1.device
gradxyz1 = torch.zeros(xyz1.size())
gradxyz2 = torch.zeros(xyz2.size())
gradxyz1 = gradxyz1.to(device)
gradxyz2 = gradxyz2.to(device)
chamfer_3D.backward(
xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2
)
return gradxyz1, gradxyz2
class chamfer_3DDist(nn.Module):
def __init__(self):
super(chamfer_3DDist, self).__init__()
def forward(self, input1, input2):
input1 = input1.contiguous()
input2 = input2.contiguous()
return chamfer_3DFunction.apply(input1, input2)
| apache-2.0 |
sid88in/incubator-airflow | airflow/lineage/__init__.py | 17 | 5067 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from functools import wraps
from airflow import configuration as conf
from airflow.lineage.datasets import DataSet
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.module_loading import import_string, prepare_classpath
from itertools import chain
PIPELINE_OUTLETS = "pipeline_outlets"
PIPELINE_INLETS = "pipeline_inlets"
log = LoggingMixin().log
def _get_backend():
backend = None
try:
_backend_str = conf.get("lineage", "backend")
prepare_classpath()
backend = import_string(_backend_str)
except ImportError as ie:
log.debug("Cannot import %s due to %s", _backend_str, ie)
except conf.AirflowConfigException:
log.debug("Could not find lineage backend key in config")
return backend
def apply_lineage(func):
"""
Saves the lineage to XCom and if configured to do so sends it
to the backend.
"""
backend = _get_backend()
@wraps(func)
def wrapper(self, context, *args, **kwargs):
self.log.debug("Lineage called with inlets: %s, outlets: %s",
self.inlets, self.outlets)
ret_val = func(self, context, *args, **kwargs)
outlets = [x.as_dict() for x in self.outlets]
inlets = [x.as_dict() for x in self.inlets]
if len(self.outlets) > 0:
self.xcom_push(context,
key=PIPELINE_OUTLETS,
value=outlets,
execution_date=context['ti'].execution_date)
if len(self.inlets) > 0:
self.xcom_push(context,
key=PIPELINE_INLETS,
value=inlets,
execution_date=context['ti'].execution_date)
if backend:
backend.send_lineage(operator=self, inlets=self.inlets,
outlets=self.outlets, context=context)
return ret_val
return wrapper
def prepare_lineage(func):
"""
Prepares the lineage inlets and outlets
inlets can be:
"auto" -> picks up any outlets from direct upstream tasks that have outlets
defined, as such that if A -> B -> C and B does not have outlets but A does,
these are provided as inlets.
"list of task_ids" -> picks up outlets from the upstream task_ids
"list of datasets" -> manually defined list of DataSet
"""
@wraps(func)
def wrapper(self, context, *args, **kwargs):
self.log.debug("Preparing lineage inlets and outlets")
task_ids = set(self._inlets['task_ids']).intersection(
self.get_flat_relative_ids(upstream=True)
)
if task_ids:
inlets = self.xcom_pull(context,
task_ids=task_ids,
dag_id=self.dag_id,
key=PIPELINE_OUTLETS)
inlets = [item for sublist in inlets if sublist for item in sublist]
inlets = [DataSet.map_type(i['typeName'])(data=i['attributes'])
for i in inlets]
self.inlets.extend(inlets)
if self._inlets['auto']:
# dont append twice
task_ids = set(self._inlets['task_ids']).symmetric_difference(
self.upstream_task_ids
)
inlets = self.xcom_pull(context,
task_ids=task_ids,
dag_id=self.dag_id,
key=PIPELINE_OUTLETS)
inlets = [item for sublist in inlets if sublist for item in sublist]
inlets = [DataSet.map_type(i['typeName'])(data=i['attributes'])
for i in inlets]
self.inlets.extend(inlets)
if len(self._inlets['datasets']) > 0:
self.inlets.extend(self._inlets['datasets'])
# outlets
if len(self._outlets['datasets']) > 0:
self.outlets.extend(self._outlets['datasets'])
self.log.debug("inlets: %s, outlets: %s", self.inlets, self.outlets)
for dataset in chain(self.inlets, self.outlets):
dataset.set_context(context)
return func(self, context, *args, **kwargs)
return wrapper
| apache-2.0 |
axbaretto/beam | sdks/python/apache_beam/io/gcp/dicomclient.py | 8 | 3893 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from google.auth import default
from google.auth.transport import requests
class DicomApiHttpClient:
"""DICOM api client that talk to api via http request"""
healthcare_base_url = "https://healthcare.googleapis.com/v1"
session = None
def get_session(self, credential):
if self.session:
return self.session
# if the credential is not provided, use the default credential.
if not credential:
credential, _ = default()
new_seesion = requests.AuthorizedSession(credential)
self.session = new_seesion
return new_seesion
def qido_search(
self,
project_id,
region,
dataset_id,
dicom_store_id,
search_type,
params=None,
credential=None):
"""function for searching a DICOM store"""
# sending request to the REST healthcare api.
api_endpoint = "{}/projects/{}/locations/{}".format(
self.healthcare_base_url, project_id, region)
# base of dicomweb path.
dicomweb_path = "{}/datasets/{}/dicomStores/{}/dicomWeb/{}".format(
api_endpoint, dataset_id, dicom_store_id, search_type)
# Make an authenticated API request
session = self.get_session(credential)
headers = {"Content-Type": "application/dicom+json; charset=utf-8"}
page_size = 500
if params and 'limit' in params:
page_size = params['limit']
elif params:
params['limit'] = page_size
else:
params = {'limit': page_size}
offset = 0
output = []
# iterate to get all the results
while True:
params['offset'] = offset
response = session.get(dicomweb_path, headers=headers, params=params)
status = response.status_code
if status != 200:
if offset == 0:
return [], status
params['offset'] = offset - 1
params['limit'] = 1
response = session.get(dicomweb_path, headers=headers, params=params)
check_status = response.status_code
if check_status == 200:
# if the number of results equals to page size
return output, check_status
else:
# something wrong with the request or server
return [], status
results = response.json()
output += results
if len(results) < page_size:
# got all the results, return
break
offset += len(results)
return output, status
def dicomweb_store_instance(
self,
project_id,
region,
dataset_id,
dicom_store_id,
dcm_file,
credential=None):
"""function for storing an instance."""
api_endpoint = "{}/projects/{}/locations/{}".format(
self.healthcare_base_url, project_id, region)
dicomweb_path = "{}/datasets/{}/dicomStores/{}/dicomWeb/studies".format(
api_endpoint, dataset_id, dicom_store_id)
# Make an authenticated API request
session = self.get_session(credential)
content_type = "application/dicom"
headers = {"Content-Type": content_type}
response = session.post(dicomweb_path, data=dcm_file, headers=headers)
return None, response.status_code
| apache-2.0 |
woobe/h2o | py/testdir_single_jvm/test_from_putfile.py | 2 | 1861 | import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i
import h2o_browse as h2b
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1,java_heap_GB=10)
else:
h2o_hosts.build_cloud_with_hosts(1,java_heap_GB=10)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_B_putfile_files(self):
timeoutSecs = 500
# "covtype169x.data",
# "covtype.13x.shuffle.data",
# "3G_poker_shuffle"
# "covtype20x.data",
# "billion_rows.csv.gz",
csvFilenameList = [
("covtype.data", 'standard/covtype.data', 1),
]
# pop open a browser on the cloud
h2b.browseTheCloud()
for (csvFilename, csvPathname, trees) in csvFilenameList:
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, timeoutSecs=500, schema='put')
print csvFilename, 'parse time:', parseResult['response']['time']
print "Parse result['destination_key']:", parseResult['destination_key']
# We should be able to see the parse result?
inspect2 = h2o_cmd.runInspect(key=parseResult['destination_key'])
print "\n" + csvFilename
start = time.time()
# constrain depth to 25
if trees is not None:
RFview = h2o_cmd.runRF(trees=trees,depth=25,parseResult=parseResult,
timeoutSecs=timeoutSecs)
sys.stdout.write('.')
sys.stdout.flush()
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
ningchi/scikit-learn | benchmarks/bench_plot_svd.py | 322 | 2899 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
fedhere/SESNCfAlib | gps.py | 1 | 6537 |
def readin(f):
thissn=snstuff.mysn(f, addlit=True)
################read supernova data and check metadata
lc, flux, dflux, snname = thissn.loadsn(f, fnir=True, verbose=True,
lit=True, addlit=True)
#thissn.printsn()
#raw_input()
thissn.readinfofileall(verbose=False, earliest=False, loose=True)
#thissn.printsn()
Dl = float(thissn.metadata['luminosity distance Mpc'])
su = templutils.setupvars()
thissn.setsn(thissn.metadata['Type'], thissn.Vmax)
myebmv=su.ebmvs[thissn.snnameshort]
print ("E(B-V)", myebmv)
myebmv+=hostebmv#su.ebmvcfa[thissn.snnameshort]
print ("E(B-V) total", myebmv)
Vmax = thissn.Vmax
thissn.setphot()
thissn.getphot(myebmv)
thissn.setphase()
thissn.printsn(photometry=False)
#thissn.printsn()
fig = pl.figure(figsize=(5,3))
thissn.plotsn(photometry=True, show=True, fig = fig)
return thissn
def skgp (x, y, yerr, phases, t0):
from sklearn.gaussian_process import GaussianProcess
XX = np.atleast_2d(np.log(x-min(x)+1)).T
#XX = np.atleast_2d(x).T
gphere = GaussianProcess(corr='squared_exponential',
theta0=t0,
thetaL=t0*0.1,
thetaU=t0*10,
nugget=(yerr / y) ** 2,
random_start=100)
gphere.fit(XX, y)
xx = np.atleast_2d(np.log(phases-min(X)+1)).T
#xx = np.atleast_2d(phases).T
y_pred, MSE = gphere.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
return (y_pred, sigma)
def georgegp (x, y, yerr, phases, kc, kc1):
import george
# Set up the Gaussian process.
kernel = kc1 * 10 * kernelfct(kc)#ExpSquaredKernel(1.0)
gp = george.GP(kernel)
#print ("wtf", gp.kernel)
# adding a small random offset to the phase so that i never have
# 2 measurements at the same time which would break the GP
# Pre-compute the factorization of the matrix.
XX = x
XX = np.log(XX-XX.min()+1)
# You need to compute the GP once before starting the optimization.
gp.compute(XX, yerr)
# Print the initial ln-likelihood.
#print("here", gp.lnlikelihood(y))
#print("here", gp.grad_lnlikelihood(y))
# Run the optimization routine.
if OPT:
p0 = gp.kernel.vector
results = op.minimize(nll, p0, jac=grad_nll, args=(gp))
print results.x
# Update the kernel and print the final log-likelihood.
gp.kernel[:] = results.x
#print(gp.lnlikelihood(y))
# gp.compute(XX, yerr)
# Compute the log likelihood.
#print(gp.lnlikelihood(y))
#t = np.linspace(0, 10, 500)
##xx = np.log(phases-min(X)+1)
xx = phases
xx = np.log(xx-x.min()+1)
mu, cov = gp.predict(y, xx)
std = np.sqrt(np.diag(cov))
return (mu, std)
import scipy.optimize as op
# Define the objective function (negative log-likelihood in this case).
def nll(p, gp):
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
ll = gp.lnlikelihood(y, quiet=True)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll(p, gp):
# Update the kernel parameters and compute the likelihood.
print ("wtf2", gp.kernel)
gp.kernel[:] = p
print (gp.kernel[:])
print -gp.grad_lnlikelihood(y, quiet=True)
return -gp.grad_lnlikelihood(y, quiet=True)
def getskgpreds(ts, x, y, yerr, phases, fig = None):
t0, t1 = ts
if t0 ==0 or t1==0:
return 1e9
#print (t0,t1)
gp1, gp2 = georgegp(x, y, yerr, x, t0, t1)
s1= sum(((gp1-y)/yerr)**2)/len(y)
#pl.figure(figsize=(1,3))
#pl.plot(x, gp1,'*')
gp1, gp2 = georgegp(x, y, yerr, phases, t0, t1)
s2= sum(np.abs((gp1[2:]+gp1[:-2]-2*gp1[1:-1])/\
(diff(phases)[1:]+diff(phases)[:-1])))
print ("%.3f"%t0, "%.3f"%t1, "%.1f"%s1, "%.3f"%s2, s1*s2)
if fig:
pl.errorbar(x,y,yerr=yerr,fmt='.')
pl.plot(phases, gp1,'-')
pl.fill_between(phases, gp1-gp2, gp1+gp2, color='k')
pl.title("%.3f %.3f %.3f"%(t0, t1, (s1*s2)), fontsize=15)
pl.ylim(pl.ylim()[1], pl.ylim()[0])
if isfinite(s1*s2) and not np.isnan(s1*s2):
return s1*s2
return 1e9
def kernelfct(kc):
from george.kernels import ExpSquaredKernel, WhiteKernel, ExpKernel, Matern32Kernel
return ExpSquaredKernel(kc)# Matern32Kernel(kc)
from scipy import stats
sn = '08D'
b='V'
def findgp(sn, b):
fall = glob.glob(os.getenv('SESNPATH')+'/finalphot/*'+sn+'*[cf]')
if len(fall)>0:
fall[-1] = [fall[-1]] + \
[ff for ff in glob.glob(os.environ['SESNPATH']+\
"/literaturedata/phot/*"+sn+".*[cf]")]
else: fall = [[ff for ff in glob.glob(os.environ['SESNPATH']+"/literaturedata/phot/*"+sn+".*[cf]")]]
f = fall[0]
if not isinstance (f, basestring):
f=f[0]
x= thissn.photometry[b]['phase']
x+=0.01*np.random.randn(len(x))
y= thissn.photometry[b]['mag']
yerr= thissn.photometry[b]['dmag']
phases = np.arange(x.min(),x.max(),0.1)
if x.max()<=30:
if x.min()<=-15:
x15 = np.where(np.abs(x+15)==np.abs(x+15).min())[0]
print x15, y[x15[0]]+0.5
x = np.concatenate([x,[30]])
y = np.concatenate([y,[y[x15[0]]+0.5]])
yerr = np.concatenate([yerr,[0.5]])
print (x,y,yerr)
elif (x>=15).sum()>1:
slope, intercept, r_value, p_value, std_err = stats.linregress(x[x>=15],y[x>=15])
x = np.concatenate([x,[30]])
y = np.concatenate([y,[slope*30.+intercept]])
yerr = np.concatenate([yerr,[yerr.max()*2]])
print (x,y,yerr)
else:
return -1
#fig = pl.figure(figsize=(10,3))
results = op.minimize(getskgpreds, (0.4,1.0), args = (x,y,yerr,phases), bounds=((0,None),(0,None)), tol=1e-8)
print (results.x)
#t1s = np.exp(np.arange(-2,2,0.5))
#for tt in np.exp(np.arange(-2,2,0.5)):
# fig = pl.figure(figsize=(10,3))
# for i,ttt in enumerate(t1s):
# ax = fig.add_subplot(len(t1s),1,i+1)
# getskgpreds(x,y,yerr,phases,tt,ttt)
fig = pl.figure(figsize=(10,3))
getskgpreds(results.x,x,y,yerr,phases, fig)
pl.ylabel(b+" magnitude")
| mit |
Fireblend/scikit-learn | examples/mixture/plot_gmm_sin.py | 247 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
ningchi/scikit-learn | sklearn/svm/tests/test_bounds.py | 277 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
overxfl0w/AutoKNW | Kernel/Config.py | 1 | 2000 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from nltk.stem.snowball import SnowballStemmer
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from scipy.spatial import distance
from FeatureExtractor import get_sentence_representation_sum, get_sentence_representation_mult,get_sentence_representation_distance,get_sentence_representation_weighted,get_sentence_representation_covariance
from sklearn import svm
from sklearn.neighbors.nearest_centroid import NearestCentroid
from sklearn.naive_bayes import GaussianNB
from sklearn import tree
from sklearn.linear_model import SGDClassifier
from Classifier import get_class
from Classifier import get_k_classes
## Project info ##
AUTHOR = "JGonzalez"
VERSION = "a0.01"
################
## General ##
VERBOSE = False
#############
## Preproces ##
LANGUAGES = ["spanish","english"]
LANGUAGE = "spanish"
STOPWORDS = []
for lang in LANGUAGES:
for word in stopwords.words(lang):
STOPWORDS.append(word)
STEMMER = SnowballStemmer(LANGUAGE)
LEMMATIZER = WordNetLemmatizer()
PATTERN_TOKENIZER = " "
PATTERN_SEP_SENTENCE = "\n"
NON_ALFANUMERIC_PATTERNS = ["\n",".",","]
NON_ALFANUMERIC_FUNC = 2
###############
## Word2vec model ##
MIN_COUNT_W2V = 2
WINDOW_W2V = 1
SIZE_W2V = 250
####################
## Feature extraction ##
SENTENCE_REPRESENTATION = get_sentence_representation_sum
DISTANCE_REPRESENTATION = distance.correlation
REGULARIZATION = False
########################
## Classification ##
DISTANCE_CLASSIFICATION = distance.correlation
K_PROJECT_CLASSIFIER = 1
## Classifier configs ##
# Add here #
# Example: SVM_KERNEL=radial, ... #
# Config here #
CLASSIFIERS = {"SVM_LINEAR":svm.LinearSVC(),"NEAREST_CENTROID":NearestCentroid(metric=DISTANCE_CLASSIFICATION),"NAIVE_BAYES":GaussianNB(),"DECISSION_TREE":tree.DecisionTreeClassifier(),"PROJECT_DISTANCE_1":get_class,"PROJECT_DISTANCE_2":get_k_classes}
CLASSIFIER = CLASSIFIERS["NEAREST_CENTROID"]
####################
| gpl-2.0 |
jmargeta/scikit-learn | sklearn/linear_model/tests/test_base.py | 8 | 3585 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD Style.
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
"""
Test LinearRegression on a simple dataset.
"""
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
"""
Test assertions on betas shape.
"""
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
| bsd-3-clause |
jmargeta/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 4 | 1648 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
by manually adding non-linear features. Kernel methods extend this idea and can
induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# License: BSD Style.
import numpy as np
import pylab as pl
from sklearn.linear_model import Ridge
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
pl.plot(x_plot, f(x_plot), label="ground truth")
pl.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
ridge = Ridge()
ridge.fit(np.vander(x, degree + 1), y)
pl.plot(x_plot, ridge.predict(np.vander(x_plot, degree + 1)),
label="degree %d" % degree)
pl.legend(loc='lower left')
pl.show()
| bsd-3-clause |
raamana/pyradigm | pyradigm/tests/test_regress_dataset.py | 1 | 11957 | import os
import random
import sys
import numpy as np
sys.dont_write_bytecode = True
from pytest import raises, warns
from pyradigm.regress import RegressionDataset as RegrDataset
out_dir = '.'
num_targets = np.random.randint(2, 50)
target_sizes = np.random.randint(10, 100, num_targets)
num_features = np.random.randint(10, 100)
num_samplets = sum(target_sizes)
# randomizing target values also to avoid subtle bugs related to their value
# using random.sample, as np.random.randint does not return all unique numbers
target_set = random.sample(range(100), num_targets)
feat_names = np.array([str(x) for x in range(num_features)])
test_dataset = RegrDataset()
for target_index, target_id in enumerate(target_set):
for sub_ix in range(target_sizes[target_index]):
subj_id = '{}_S{:05d}'.format(target_id, sub_ix)
feat = np.random.random(num_features)
test_dataset.add_samplet(subj_id, feat, target_id,
feature_names=feat_names)
out_file = os.path.join(out_dir, 'random_example_dataset.pkl')
test_dataset.save(out_file)
# same IDs, new features
same_ids_new_feat = RegrDataset()
for sub_id in test_dataset.samplet_ids:
feat = np.random.random(num_features)
same_ids_new_feat.add_samplet(sub_id, feat,
test_dataset.targets[sub_id])
same_ids_new_feat.feature_names = np.array(['new_f{}'.format(x) for x in range(
num_features)])
test_dataset.description = 'test dataset'
print(test_dataset)
print('default format:\n {}'.format(test_dataset))
print('full repr :\n {:full}'.format(test_dataset))
print('string/short :\n {:s}'.format(test_dataset))
target_set, target_sizes = test_dataset.summarize()
reloaded_dataset = RegrDataset(dataset_path=out_file,
description='reloaded test_dataset')
copy_dataset = RegrDataset(in_dataset=test_dataset)
rand_index = np.random.randint(0, len(target_set), 1)[0]
random_target_name = target_set[rand_index]
random_target_ds = test_dataset.get_target(random_target_name)
other_targets_ds = test_dataset - random_target_ds
other_target_set = set(target_set) - set([random_target_name])
other_targets_get_with_list = test_dataset.get_target(other_target_set)
recombined = other_targets_ds + random_target_ds
empty_dataset = RegrDataset()
test2 = RegrDataset()
test3 = RegrDataset()
def test_empty():
assert not empty_dataset
def test_target_type():
rand_id = test_dataset.samplet_ids[np.random.randint(2, num_samplets)]
if not isinstance(test_dataset.targets[rand_id],
test_dataset._target_type):
raise TypeError('invalid target type for samplet id {}'.format(rand_id))
def test_num_targets():
assert test_dataset.num_targets == num_targets
def test_num_features():
assert test_dataset.num_features == num_features
def test_shape():
assert test_dataset.shape == (num_samplets, num_features)
def test_num_features_setter():
with raises(AttributeError):
test_dataset.num_features = 0
def test_num_samples():
assert test_dataset.num_samplets == sum(target_sizes)
def test_subtract():
assert other_targets_ds.num_samplets == sum(target_sizes) - target_sizes[
rand_index]
def test_get_target_list():
assert other_targets_ds == other_targets_get_with_list
def test_add():
a = other_targets_ds + random_target_ds
n = a.num_samplets
n1 = other_targets_ds.num_samplets
n2 = random_target_ds.num_samplets
assert n1 + n2 == n
assert set(a.samplet_ids) == set(
other_targets_ds.samplet_ids + random_target_ds.samplet_ids)
assert a.num_features == other_targets_ds.num_features == \
random_target_ds.num_features
assert all(a.feature_names == other_targets_ds.feature_names)
comb_ds = test_dataset + same_ids_new_feat
comb_names = np.concatenate([test_dataset.feature_names,
same_ids_new_feat.feature_names])
if not all(comb_ds.feature_names == comb_names):
raise ValueError('feature names were not carried forward in combining two '
'datasets with same IDs and different feature names!')
def test_set_existing_sample():
sid = test_dataset.samplet_ids[0]
new_feat = np.random.random(num_features)
with raises(KeyError):
test_dataset[sid + 'nonexisting'] = new_feat
with raises(ValueError):
test_dataset[sid] = new_feat[:-2] # diff dimensionality
test_dataset[sid] = new_feat
if not np.all(test_dataset[sid] == new_feat):
raise ValueError('Bug in replacing features for an existing sample!'
'Retrieved features do not match previously set features.')
def test_data_type():
for in_dtype in [np.float_, np.int, np.bool_]:
cds = RegrDataset(dtype=in_dtype)
cds.add_samplet('a', [1, 2.0, -434], 2)
if cds.dtype != in_dtype or cds['a'].dtype != in_dtype:
raise TypeError('Dataset not maintaining the features in the requested'
'dtype {}. They are in {}'.format(in_dtype, cds.dtype))
def test_cant_read_nonexisting_file():
with raises(IOError):
a = RegrDataset('/nonexistentrandomdir/disofddlsfj/arbitrary.noname.pkl')
def test_cant_write_to_nonexisting_dir():
with raises(IOError):
test_dataset.save('/nonexistentrandomdir/jdknvoindvi93/arbitrary.noname.pkl')
def test_invalid_constructor():
with raises(TypeError):
a = RegrDataset(
in_dataset='/nonexistentrandomdir/disofddlsfj/arbitrary.noname.pkl')
with raises(ValueError):
# data simply should not be a dict
b = RegrDataset(dataset_path=None, in_dataset=None, data=list())
with raises(ValueError):
c = RegrDataset(dataset_path=None,
in_dataset=None,
data=None,
targets='invalid_value')
def test_return_data_labels():
matrix, vec_labels, sub_ids = test_dataset.data_and_targets()
assert len(vec_labels) == len(sub_ids)
assert len(vec_labels) == matrix.shape[0]
def test_init_with_dict():
new_ds = RegrDataset(data=test_dataset.data,
targets=test_dataset.targets)
assert new_ds == test_dataset
# def test_labels_setter():
# fewer_labels = test_dataset.labels
# label_keys = list(fewer_labels.samplet_ids())
# fewer_labels.pop(label_keys[0])
#
# with raises(ValueError):
# test_dataset.labels = fewer_labels
#
# same_len_diff_key = fewer_labels
# same_len_diff_key[u'sldiursvdkvjs'] = 1
# with raises(ValueError):
# test_dataset.labels = same_len_diff_key
#
# # must be dict
# with raises(ValueError):
# test_dataset.labels = None
def test_targets_setter():
fewer_targets = test_dataset.targets
targets_keys = list(fewer_targets.keys())
fewer_targets.pop(targets_keys[0])
with raises(ValueError):
test_dataset.targets = fewer_targets
same_len_diff_key = fewer_targets
same_len_diff_key['sldiursvdkvjs'] = 'lfjd'
with raises(ValueError):
test_dataset.targets = same_len_diff_key
def test_feat_names_setter():
# fewer
with raises(ValueError):
test_dataset.feature_names = feat_names[0:test_dataset.num_features - 2]
# too many
with raises(ValueError):
test_dataset.feature_names = np.append(feat_names, 'blahblah')
def test_add_existing_id():
sid = test_dataset.samplet_ids[0]
with raises(ValueError):
test_dataset.add_samplet(sid, None, None)
def test_add_new_id_diff_dim():
new_id = 'dsfdkfslj38748937439kdshfkjhf38'
sid = test_dataset.samplet_ids[0]
data_diff_dim = np.random.rand(test_dataset.num_features + 1, 1)
with raises(ValueError):
test_dataset.add_samplet(new_id, data_diff_dim, None, None)
def test_del_nonexisting_id():
nonexisting_id = u'dsfdkfslj38748937439kdshfkjhf38'
with warns(UserWarning):
test_dataset.del_samplet(nonexisting_id)
def test_get_nonexisting_class():
nonexisting_id = u'dsfdkfslj38748937439kdshfkjhf38'
with raises(ValueError):
test_dataset.get_target(nonexisting_id)
def test_rand_feat_subset():
nf = copy_dataset.num_features
subset_len = np.random.randint(1, nf)
subset = np.random.randint(1, nf, size=subset_len)
subds = copy_dataset.get_feature_subset(subset)
assert subds.num_features == subset_len
def test_eq_self():
assert test_dataset == test_dataset
def test_eq_copy():
new_copy = RegrDataset(in_dataset=copy_dataset)
assert new_copy == copy_dataset
def test_unpickling():
out_file = os.path.join(out_dir, 'random_pickled_dataset.pkl')
copy_dataset.save(out_file)
reloaded_dataset = RegrDataset(dataset_path=out_file,
description='reloaded test_dataset')
assert copy_dataset == reloaded_dataset
def test_subset_class():
assert random_target_ds.num_samplets == target_sizes[rand_index]
def test_get_subset():
assert random_target_ds == reloaded_dataset.get_target(random_target_name)
nonexisting_id = u'dsfdkfslj38748937439kdshfkjhf38'
with warns(UserWarning):
test_dataset.get_subset(nonexisting_id)
def test_membership():
rand_idx = np.random.randint(0, test_dataset.num_samplets)
member = test_dataset.samplet_ids[rand_idx]
not_member = u'sdfdkshfdsk34823058wdkfhd83hifnalwe8fh8t'
assert member in test_dataset
assert not_member not in test_dataset
def rand_ints_range(n, k):
return np.random.random_integers(1, n, min(n, k))
def test_glance():
for k in np.random.randint(1, test_dataset.num_samplets - 1, 10):
glanced_subset = test_dataset.glance(k)
assert len(glanced_subset) == k
def test_random_subset():
for perc in np.arange(0.1, 1, 0.2):
subset = copy_dataset.random_subset(perc=perc)
# NOT separating the calculation by target to avoid multiple rounding errors
expected_size = np.int64(np.floor(num_samplets * perc))
assert subset.num_samplets == expected_size
def test_random_subset_by_count():
smallest_size = min(target_sizes)
for count in np.random.randint(1, smallest_size, 7):
subset = copy_dataset.random_subset_ids_by_count(count=count)
assert len(subset) == count
def test_train_test_split_ids_count():
smallest_size = min(target_sizes)
for count in np.random.randint(1, smallest_size, 7):
subset_train, subset_test = copy_dataset.train_test_split_ids(count=count)
assert len(subset_train) == count
assert len(subset_test) == copy_dataset.num_samplets - count
assert len(set(subset_train).intersection(subset_test)) == 0
with raises(ValueError):
copy_dataset.train_test_split_ids(count=-1)
with raises(ValueError):
copy_dataset.train_test_split_ids(count=copy_dataset.num_samplets + 1.0)
with raises(ValueError):
# both cant be specified at the same time
copy_dataset.train_test_split_ids(count=2, train_perc=0.5)
def test_train_test_split_ids_perc():
for perc in np.arange(0.25, 1.0, 0.1):
subset_train, subset_test = copy_dataset.train_test_split_ids(
train_perc=perc)
expected_train_size = np.floor(num_samplets * perc)
assert len(subset_train) == expected_train_size
assert len(subset_test) == copy_dataset.num_samplets - expected_train_size
assert len(set(subset_train).intersection(subset_test)) == 0
with raises(ValueError):
subset_train, subset_test = copy_dataset.train_test_split_ids(
train_perc=0.00001)
with raises(ValueError):
copy_dataset.train_test_split_ids(train_perc=1.1)
with raises(ValueError):
copy_dataset.train_test_split_ids(train_perc=-1)
test_random_subset_by_count()
| mit |
Akshay0724/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 36 | 6957 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.neighbors import BallTree
from sklearn.utils.testing import SkipTest, assert_raises_regex
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_pickle(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
yield self.check_pickle, metric, kwargs
for metric in self.bool_metrics:
yield self.check_pickle_bool, metric
def check_pickle_bool(self, metric):
dm = DistanceMetric.get_metric(metric)
D1 = dm.pairwise(self.X1_bool)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1_bool)
assert_array_almost_equal(D1, D2)
def check_pickle(self, metric, kwargs):
dm = DistanceMetric.get_metric(metric, **kwargs)
D1 = dm.pairwise(self.X1)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1)
assert_array_almost_equal(D1, D2)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
def test_bad_pyfunc_metric():
def wrong_distance(x, y):
return "1"
X = np.ones((5, 2))
assert_raises_regex(TypeError,
"Custom distance function must accept two vectors",
BallTree, X, metric=wrong_distance)
def test_input_data_size():
# Regression test for #6288
# Previoulsly, a metric requiring a particular input dimension would fail
def custom_metric(x, y):
assert x.shape[0] == 3
return np.sum((x - y) ** 2)
rng = np.random.RandomState(0)
X = rng.rand(10, 3)
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
eucl = DistanceMetric.get_metric("euclidean")
assert_array_almost_equal(pyfunc.pairwise(X), eucl.pairwise(X))
| bsd-3-clause |
Huangtuzhi/AlibabaRecommand | ObtainPredict.py | 3 | 5686 | #!/usr/bin/python env
# -*- coding: utf-8 -*-
import numpy as np
from sklearn.linear_model import LogisticRegression
__author__ = 'Huang yi'
class PredictEmption(object):
def __init__(self):
pass
def DivideSet(self):
# 第一次加载重写txt文件到numpy的格式npy中。函数依次运行一次。
train_data = np.loadtxt('data_features.txt')
train_data_label = np.loadtxt('data_labels.txt')
np.save('data_features.npy', train_data)
np.save('data_labels.npy', train_data_label)
fop1 = open('feature_pos.txt', 'w+')
fop2 = open('feature_neg.txt', 'w+')
train_data = np.load('data_features.npy')
train_data_label = np.load('data_labels.npy')
label_size = train_data_label.size
# 转置
train_data_label.shape = (1, label_size)
train_data_label_transpose = np.transpose(train_data_label)
# 将label加到每行特征的最后
line = data_with_label = np.hstack((train_data, train_data_label_transpose))
# 把正例和负例分着放入feature_pos.txt和feature_neg.txt中
for i in range(0, label_size):
if(line[i][-1]):
fop1.write('%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d' %
(line[i][0],line[i][1],line[i][2],line[i][3],line[i][4],line[i][5],line[i][6],line[i][7],line[i][8],line[i][9],
line[i][10],line[i][11],line[i][12],line[i][13],line[i][14],line[i][15],line[i][16],line[i][17],line[i][18],line[i][19]))
fop1.write('\n')
else:
fop2.write('%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d' %
(line[i][0],line[i][1],line[i][2],line[i][3],line[i][4],line[i][5],line[i][6],line[i][7],line[i][8],line[i][9],
line[i][10],line[i][11],line[i][12],line[i][13],line[i][14],line[i][15],line[i][16],line[i][17],line[i][18],line[i][19]))
fop2.write('\n')
fop1.close()
fop2.close()
feature_pos = np.loadtxt('feature_pos.txt')
feature_neg = np.loadtxt('feature_neg.txt')
np.save('feature_p.npy', feature_pos)
np.save('feature_n.npy', feature_neg)
def GenTrainTestSet(self):
feature_pos = np.load('feature_p.npy')
feature_neg = np.load('feature_n.npy')
# 打乱生成随机序列
np.random.shuffle(feature_pos)
np.random.shuffle(feature_neg)
TrainSet = np.vstack((feature_pos[0:800], feature_neg[0:500000]))
TestSet = np.vstack((feature_pos[800:999], feature_neg[500000:750000]))
np.save('TrainSet.npy', TrainSet)
np.save('TestSet.npy', TestSet)
def TrainAndPredict(self, train_data, train_data_label, predict_data):
# 应用Logistic回归进行预测
LR = LogisticRegression()
LR.fit(train_data, train_data_label)
predict_labels = LR.predict(predict_data)
# 0为不买的概率,1为买的概率
predict_proba = LR.predict_proba(predict_data)[:, -1]
return predict_labels, predict_proba
def GetF1(self, predict_list, real_list):
_list = [x for x in predict_list if x in real_list]
equal_num = len(_list)
prediction_set_num = len(predict_list)
reference_set_num = len(real_list)
precision = float(equal_num) / prediction_set_num
recall = float(equal_num) / reference_set_num
f1 = float(2 * precision * recall) / (precision + recall)
return precision, recall, f1
# 用30day的TrainSet和TestSet测试,选出最优阈值
def TestPredict(self):
train_set = np.load('TrainSet.npy')
test_set = np.load('TestSet.npy')
train_data = train_set[:, 0:-1] # 去除label后的特征数据
train_data_label = train_set[:, -1] # label
test_data = test_set[:, 0:-1] # 不需要label
test_data_label = test_set[:, -1] # label
predict_labels, predict_proba = self.TrainAndPredict(train_data, train_data_label, test_data)
# 测试出最优的阈值设置
# best_th = 0.014
for i in range(0, 2):
rough_set = float(i) / 10
for j in range(0, 100):
tiny_set = float(j)/1000
th = rough_set + tiny_set
# 预测出的购买的list
index = np.array(range(0, len(predict_proba)))
# 大于阈值对应的索引
index_predict = index[predict_proba > th]
index_predict = list(index_predict)
# 真实购买的list
index_real = index[test_data_label > 0]
precision, recall, f1 = self.GetF1(index_predict, index_real)
print 'TH %s : %s %s %s | %s %s' % (th, precision, recall, f1, len(index_predict), len(index_real))
# index = np.array(range(0, len(predict_proba)))
# # 大于阈值对应的索引
# index_predict = index[predict_proba > best_th]
# index_predict = list(index_predict)
# # 真实购买的list
# index_real = index[test_data_label > 0]
# precision, recall, f1 = self.GetF1(index_predict, index_real)
# print 'TH %s : %s %s %s' % (best_th, precision, recall, f1)
if __name__ == '__main__':
# predict_data = np.load('predict_data.txt')
# PE.TrainAndPredict(train_data, train_data_label, predict_data)
# LR = LogisticRegression()
# ret = LR.fit(train_data, train_data_label)
# predict_labels = LR.predict(train_data[3000:3005])
PE = PredictEmption()
# PE.DivideSet()
# PE.GenTrainTestSet()
PE.TestPredict() | mit |
jmargeta/scikit-learn | sklearn/decomposition/tests/test_fastica.py | 5 | 5595 | """
Test the fastica algorithm.
"""
import warnings
import itertools
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
"""
Test gram schmidt orthonormalization
"""
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
""" Test the FastICA algorithm on very simple data.
"""
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, 3 * x ** 2
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
ica.fit(m.T)
ica.get_mixing_matrix()
assert_true(ica.components_.shape == (2, 2))
assert_true(ica.sources_.shape == (1000, 2))
ica = FastICA(fun=np.tanh, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
ica = FastICA(whiten=False, random_state=0)
ica.fit(m)
ica.get_mixing_matrix()
# test for issue #697
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
ica = FastICA(n_components=1, whiten=False, random_state=0)
ica.fit(m) # should raise warning
assert_true(len(w) == 1) # 1 warning should be raised
def test_non_square_fastica(add_noise=False):
""" Test the FastICA algorithm on very simple data.
"""
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
Akshay0724/scikit-learn | sklearn/datasets/__init__.py | 59 | 3734 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_breast_cancer
from .base import load_boston
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_sample_images
from .base import load_sample_image
from .base import load_wine
from .base import get_data_home
from .base import clear_data_home
from .covtype import fetch_covtype
from .kddcup99 import fetch_kddcup99
from .mlcomp import load_mlcomp
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'fetch_kddcup99',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'load_wine',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
jakevdp/sklearn_pycon2015 | notebooks/fig_code/helpers.py | 75 | 2301 | """
Small helpers for code that is not shown in the notebooks
"""
from sklearn import neighbors, datasets, linear_model
import pylab as pl
import numpy as np
from matplotlib.colors import ListedColormap
# Create color maps for 3-class classification problem, as with iris
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
def plot_iris_knn():
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
knn = neighbors.KNeighborsClassifier(n_neighbors=3)
knn.fit(X, y)
x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
np.linspace(y_min, y_max, 100))
Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure()
pl.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
pl.xlabel('sepal length (cm)')
pl.ylabel('sepal width (cm)')
pl.axis('tight')
def plot_polynomial_regression():
rng = np.random.RandomState(0)
x = 2*rng.rand(100) - 1
f = lambda t: 1.2 * t**2 + .1 * t**3 - .4 * t **5 - .5 * t ** 9
y = f(x) + .4 * rng.normal(size=100)
x_test = np.linspace(-1, 1, 100)
pl.figure()
pl.scatter(x, y, s=4)
X = np.array([x**i for i in range(5)]).T
X_test = np.array([x_test**i for i in range(5)]).T
regr = linear_model.LinearRegression()
regr.fit(X, y)
pl.plot(x_test, regr.predict(X_test), label='4th order')
X = np.array([x**i for i in range(10)]).T
X_test = np.array([x_test**i for i in range(10)]).T
regr = linear_model.LinearRegression()
regr.fit(X, y)
pl.plot(x_test, regr.predict(X_test), label='9th order')
pl.legend(loc='best')
pl.axis('tight')
pl.title('Fitting a 4th and a 9th order polynomial')
pl.figure()
pl.scatter(x, y, s=4)
pl.plot(x_test, f(x_test), label="truth")
pl.axis('tight')
pl.title('Ground truth (9th order polynomial)')
| bsd-3-clause |
abinit/abinit | scripts/post_processing/AbinitBandStructureMaker.py | 1 | 64037 | #===================================
program = 'AbinitBandStructureMaker.py'
version = '1.3'
#===================================
# last modified : november 16 2010
# written by Benjamin Tardif
# [email protected]
# Modified by Paul Boulanger
# 1.3 : Converted to numpy
# : corrected a bug related to termination of the segments (line 114)
# : defined a second angletol to distinguish between specialkpt detection and segment construction
#===================================
# ********* no longer used or maintained *******
#=====================================================================================================================================================================
#IMPORTS
import os
import re
import sys
import time
from numpy import *
#from Numeric import *
#=====================================================================================================================================================================
#=====================================================================================================================================================================
#VARIABLES
class VariableContainer:pass
#default variables
default = VariableContainer()
default.setupfilename = '.%s_setup' %program[:-3]
default.launchcommand = 'xmgrace'
default.autolaunch = 'no'
default.energyshift = 'yes'
default.valbandcolor = 'blue'
default.conbandcolor = 'red'
default.bandlinewidth = 1
default.fermilinewidth = 1
default.separatorlinewidth = 1
default.emptyspacewidth = 10
#control variables
ctrl = VariableContainer()
ctrl.arglist = sys.argv[1:] # list of strings containing all the input arguments in the command line
# list of valid keywords which can be used in the command line
ctrl.validkeywords = ['-setautolaunch','-setenergyshift','-setlinecolor','-setlinewidth','-setspacewidth','-setup','-setdefault','-debug']
ctrl.debugmode = False # True if debug mode is activated (by adding keyword "-debug" in the command line)
ctrl.launchcommand = str() # string containing the xmgrace launch command
ctrl.autolaunch = str() # 'yes'or 'no', indicating if xmgrace will be automatically launched each time a .agr file is created
ctrl.energyshift = str() # 'yes' or 'no', indicating if the energies will be shifted to bring the fermi energy to zero
ctrl.valbandcolor = str()
ctrl.conbandcolor = str()
ctrl.bandlinewidth = int()
ctrl.fermilinewidth = int()
ctrl.separatorlinewidth = int()
ctrl.emptyspacewidth = int()
# dictionary maping color name with color number in xmgrace
ctrl.xmgracecolor = {
'white' : 0,
'black' : 1,
'red' : 2,
'green' : 3,
'blue' : 4,
'yellow' : 5,
'brown' : 6,
'grey' : 7,
'violet' : 8,
'cyan' : 9,
'magenta' :10,
'orange' :11,
'indigo' :12,
'maroon' :13,
'turquoise':14,
'green4' :15}
ctrl.filename = str() # name of the file entered (*.out or *.dbs)
ctrl.filetype = str() # 'out' or 'dbs' according to the type of the file entered
ctrl.filedata = list() # list of strings each containing one line of the file entered
ctrl.dbsfilename = str() # name of the file produced (*.dbs)
ctrl.agrfilename = str() # name of the file produced (*.agr)
ctrl.angletol = 1 # maximum angle between 2 k-points under which they will be considered being in the same direction
ctrl.angletol2 = 0.1
ctrl.bandstructurescheme = str() # scheme of the type '{lA}-{lB}-{lC}' corresponding to the band structure to be plotted
# each {} corresponds to the name given to a special k-point
# {} 1st character : 'l' for 'letter' or 's' for 'symbol'
# {} 2nd character : a letter which combined with the 1st character will give a caption associated with this special k-point
# '-' between {} indicates that a band structure must be plotted beteen the 2 corresponding k-points
# ' ' (empty space) between {} indicates that an empty space must be inserted in the band structure
ctrl.dicospecialkpt = dict() # dictionary mapping , keys=[vectors], values={lx}
ctrl.segmentcaptionlist = list() # list of lists of string, each list having the form
# [{lA}, {lB}] for a segment joining 2 points
# ['empty space'] for a empty space in the band structure
ctrl.spacepercent = 10 # total percentage (in % units) of the graph to be occupied by empty spaces, if any
ctrl.segmentcartlength = array([]) # array containing the cartesian length (in bohrs) of each segment of the band structure
ctrl.segmentrellength = array([]) # array containing the relative length (dimensionless) of each segment of the band structure
ctrl.dicoxkpt = {} #dictionary linking x to kpt (array)
ctrl.captiontick = list()
ctrl.X = array([])
ctrl.Y = list() # list of array(nband,Float)
ctrl.nvalenceband = int()
ctrl.bandgap = float()
ctrl.hartree_to_eV = float(27.2113845) #eV/hartree
ctrl.ndataset = int() # number of datasets found in the .out file
ctrl.datasetlocation = list() # list containing the starting and ending line index of each dataset
ctrl.useddataset = list() # list containing the number of used dataset
ctrl.databasekey = list() # list of list containing the informations describing the calculation parameters used to generate the k-point database
ctrl.alphabet={
1:'A', 2:'B', 3:'C', 4:'D', 5:'E', 6:'F', 7:'G', 8:'H', 9:'I',10:'J',11:'K',12:'L',13:'M',
14:'N',15:'O',16:'P',17:'Q',18:'R',19:'S',20:'T',21:'U',22:'V',23:'W',24:'X',25:'Y',26:'Z'}
#data variables
data = VariableContainer()
data.nband = int()
data.kpt = list() # list of array(3,Float)
data.energy = list() # list of array(data.nband,Float)
data.specialkpt = list() # list of array(3,Float)
data.G = list() # list of array(3,Float) reciprocal vectors
data.fermienergy = float() #fermi energy, in eV
data.typat = list() # list containing the type of each atom
data.units = str() # 'eV' or 'hartree'
#graph variables
graph = VariableContainer()
graph.title = str() # title of the graph
graph.worldymin = float()
graph.worldymax = float()
#feedback variables
feedback = VariableContainer()
feedback.feedback = False
feedback.filename = '.%s_feedback' %program[:-3]
feedback.ntimeused = int()
feedback.email = '[email protected]'
#=====================================================================================================================================================================
#=====================================================================================================================================================================
#METHODS : general
def header(program,version):
# type(program) = string
# type(version) = string
# returns a header to be printed in the shell each time the program is launched
L = len(program)+len(version)+9+2
line = L*'='
header = '\n%s\n %s version %s\n%s' %(line,program,version,line)
return header
def detectfile(filename,path='.'):
# type(filename) = string
# type(path) = string
# returns True if the given file is found in the specified path
if filename in os.listdir(path):
return True
else:
return False
def floatable(x):
# type(x) = string, int or float
# returns True if given x can be converted to a float
try:
float(x)
return True
except:
return False
#=====================================================================================================================================================================
#=====================================================================================================================================================================
#METHODS : list manipulation
def clean(list):
# type(list) = list of strings (usually obtained with the ".readlines()" method)
# removes "\n" and "\r" and empty lines from given list
L = len(list)
for i in range(L):
list[L-1-i] = list[L-1-i].replace('\n','')
list[L-1-i] = list[L-1-i].replace('\r','')
if list[L-1-i].split() == []:
list.pop(L-1-i)
def clean2(list):
# type(list) = list of strings (usually obtained with the ".readlines()" method)
# removes "\n" and "\r" from given list and replaces empty lines by "#"
L = len(list)
for i in range(L):
list[L-1-i] = list[L-1-i].replace('\n','')
list[L-1-i] = list[L-1-i].replace('\r','')
if list[L-1-i].split() == []:
list[L-1-i] = "#"
def rmrepetitions(list,pairedlist=None):
# type(list) = any list whith all elements of the same length
# removes repeated entries in the list, keeping only the first occurence
# (if a paired list is specified, data removed from list will be removes from pairedlist too)
# example : rmrepetition([1,2,2,3,4,4,4,8,7,6,7]) = [1,2,3,4,8,7,6]
L = len(list)
try:s = len(list[0])
except:s = 1
i = 0
while i < len(list)-1:
j = i+1
while j < len(list):
if sum(list[j] == list[i]) == s:
list.pop(j)
if pairedlist:pairedlist.pop(j)
j-=1
j+=1
i+=1
def rmsuccessiverepetitions(list,pairedlist=None):
# type(list) = any list whith all elements of the same length
# removes repeated successives entries in the list, keeping only the first occurence.
# (if a paired list is specified, data removed from list will be removes from pairedlist too)
# example : rmrepetition([1,2,2,3,4,4,4,1,2,3,3]) = [1,2,3,4,1,2,3]
L = len(list)
try:s = len(list[0])
except:s = 1
i = 0
while i < len(list)-1:
j = i+1
if sum(list[j] == list[i]) == s:
list.pop(j)
if pairedlist:pairedlist.pop(j)
i-=1
i+=1
#=====================================================================================================================================================================
#=====================================================================================================================================================================
#METHODS : vector operations
def norm(vector):
# type(vector) = array(3,Float)
# returns the norm of a vector
x = vector[0]
y = vector[1]
z = vector[2]
norm = (x**2+y**2+z**2)**0.5
return norm
def angle(vector1,vector2):
# type(vector1) = array(3,Float)
# type(vector2) = array(3,Float)
# returns the angle (in degree) between the two vectors
arg = dot(vector1,vector2)/norm(vector1)/norm(vector2)
if arg > 1:arg= 1
if arg < -1:arg=-1
theta = (arccos(arg))/pi*180
return theta
def kpt_red_to_cart(kptred,primvectors):
# type(kptred) = array(3,Float) representing the reduced coordinates of a k-point
# type(primvectors) = a list of 3 array(3,Float) each representing a primitive vector in cartesian coordinates
# returns an array(3,Float) containing the coordinates of the given k-point in cartesian coordinates
kptcart = kptred[0]*primvectors[0] + kptred[1]*primvectors[1] + kptred[2]*primvectors[2]
return kptcart
#=====================================================================================================================================================================
#=====================================================================================================================================================================
#METHODS : setup system
def writesetupfile(setupfilename=default.setupfilename,path=sys.path[0],\
launchcommand=default.launchcommand,autolaunch=default.autolaunch,\
energyshift=default.energyshift,\
valbandcolor=default.valbandcolor,conbandcolor=default.conbandcolor,\
bandlinewidth=default.bandlinewidth,fermilinewidth=default.fermilinewidth,separatorlinewidth=default.separatorlinewidth,\
emptyspacewidth=default.emptyspacewidth):
writer = open(sys.path[0]+'/'+setupfilename,'w')
writer.write('----------------------------------------------------------------------------')
writer.write('\n this file is used by the program %s (version %s)' %(program,version))
writer.write('\n (this file is not essential and can be deleted if needed)')
writer.write('\n----------------------------------------------------------------------------')
writer.write('\n\n============================================================================')
writer.write('\n-setautolaunch')
writer.write('\n\nXMGRACE LAUNCH COMMAND:\n%s' %launchcommand)
writer.write('\n\nXMGRACE AUTOLAUNCH:\n%s' %autolaunch)
writer.write('\n============================================================================')
writer.write('\n\n============================================================================')
writer.write('\n-setenergyshift')
writer.write('\n\nSHIFT FERMI ENERGY TO ZERO:\n%s' %energyshift)
writer.write('\n============================================================================')
writer.write('\n\n============================================================================')
writer.write('\n-setlinecolor')
writer.write('\n\nVALENCE BANDS COLOR:\n%s' %valbandcolor)
writer.write('\n\nCONDUCTION BANDS COLOR:\n%s' %conbandcolor)
writer.write('\n============================================================================')
writer.write('\n\n============================================================================')
writer.write('\n-setlinewidth')
writer.write('\n\nBAND LINES WIDTH:\n%s' %bandlinewidth)
writer.write('\n\nFERMI ENERGY LINE WIDTH:\n%s' %fermilinewidth)
writer.write('\n\nSEPARATOR LINES WIDTH:\n%s' %separatorlinewidth)
writer.write('\n============================================================================')
writer.write('\n\n============================================================================')
writer.write('\n-setspacewidth')
writer.write('\n\nEMPTY SPACE(S) WIDTH PERCENTAGE:\n%s' %emptyspacewidth)
writer.write('\n============================================================================')
writer.close()
def setupfilecompatibility(oldsetupfile,newsetupfile):
reader = open(sys.path[0]+'/'+default.setupfilename,'r')
setup1 = reader.readlines()
reader.close()
reader = open(sys.path[0]+'/'+default.setupfilename+'2','r')
setup2 = reader.readlines()
reader.close()
if len(setup1) == len(setup2):
i = 1 #skip the first three lines of the file
while i < len(setup1)-1:
i+=1
if setup1[i] != setup2[i]: return False
if ':' in setup1[i]: i+=1
return True
else:
return False
def getsettings():
# --> ctrl.launchcommand,ctrl.autolaunch,ctrl.energyshift,ctrl.valbandcolor,ctrl.conbandcolor,ctrl.bandlinewidth,ctrl.fermilinewidth,ctrl.emptyspacewidth
reader = open(sys.path[0]+'/'+default.setupfilename,'r')
setupfile = reader.readlines()
reader.close()
clean2(setupfile)
for i in range(len(setupfile)):
if setupfile[i] == 'XMGRACE LAUNCH COMMAND:':
ctrl.launchcommand = setupfile[i+1]
elif setupfile[i] == 'XMGRACE AUTOLAUNCH:':
ctrl.autolaunch = setupfile[i+1]
elif setupfile[i] == 'SHIFT FERMI ENERGY TO ZERO:':
ctrl.energyshift = setupfile[i+1]
elif setupfile[i] == 'VALENCE BANDS COLOR:':
ctrl.valbandcolor = setupfile[i+1]
elif setupfile[i] == 'CONDUCTION BANDS COLOR:':
ctrl.conbandcolor = setupfile[i+1]
elif setupfile[i] == 'BAND LINES WIDTH:':
ctrl.bandlinewidth = setupfile[i+1]
elif setupfile[i] == 'FERMI ENERGY LINE WIDTH:':
ctrl.fermilinewidth = setupfile[i+1]
elif setupfile[i] == 'SEPARATOR LINES WIDTH:':
ctrl.separatorlinewidth = setupfile[i+1]
elif setupfile[i] == 'EMPTY SPACE(S) WIDTH PERCENTAGE:':
ctrl.emptyspacewidth = setupfile[i+1]
#=====================================================================================================================================================================
#=====================================================================================================================================================================
#METHODS : feedback system
def feedbackbugged(jobtype='?'):
if detectfile(feedback.filename,sys.path[0]) == False:
#feedback file does not exists, create a default one
writer = open(sys.path[0]+'/'+feedback.filename,'w')
writer.write('times used / date used / version used / job type / job status / {crash reason}\n')
writer.write('\n%s\t%s\t%s\t%s\t%s' %(feedback.ntimeused+1,time.ctime(),version,jobtype,'BUGGED'))
writer.close()
else:
#feedback file already exist, update it
reader = open(sys.path[0]+'/'+feedback.filename,'r')
filedata = reader.readlines()
reader.close()
writer = open(sys.path[0]+'/'+feedback.filename,'w')
if jobtype =='?':
#this is the first time the method is being called
feedback.ntimeused = int(filedata[-1].split()[0])
writer.writelines(filedata)
writer.write('\n%s\t%s\t%s\t%s\t%s' %(feedback.ntimeused+1,time.ctime(),version,jobtype,'BUGGED'))
writer.close()
else:
#this is not the first time the method is being called
filedata.pop(-1)
writer.writelines(filedata)
writer.write('%s\t%s\t%s\t%s\t%s' %(feedback.ntimeused+1,time.ctime(),version,jobtype,'BUGGED'))
def feedbackcrashed(jobtype,reason):
reader = open(sys.path[0]+'/'+feedback.filename,'r')
filedata = reader.readlines()
reader.close()
filedata.pop(-1)
writer = open(sys.path[0]+'/'+feedback.filename,'w')
writer.writelines(filedata)
writer.write('%s\t%s\t%s\t%s\t%s\t\t--> %s' %(feedback.ntimeused+1,time.ctime(),version,jobtype,'CRASHED',reason))
writer.close()
try:
os.system('mail -s "%s feedback #%s" %s < %s' %(program,feedback.ntimeused+1,feedback.email,sys.path[0]+'/'+feedback.filename))
except:
pass
def feedbackcompleted(jobtype):
reader = open(sys.path[0]+'/'+feedback.filename,'r')
filedata = reader.readlines()
reader.close()
filedata.pop(-1)
writer = open(sys.path[0]+'/'+feedback.filename,'w')
writer.writelines(filedata)
writer.write('%s\t%s\t%s\t%s\t%s' %(feedback.ntimeused+1,time.ctime(),version,jobtype,'COMPLETED'))
writer.close()
try:
os.system('mail -s "%s feedback #%s" %s < %s' %(program,feedback.ntimeused+1,feedback.email,sys.path[0]+'/'+feedback.filename))
except:
pass
#=====================================================================================================================================================================
#---------------------------------------------------------------------------------------------------------------------------------------------------------------------
#MAIN
print header(program,version)
if feedback.feedback==True:feedbackbugged()
#=====================================================================================================================================================================
#SETUP FILE
if detectfile(default.setupfilename,sys.path[0]) == False:
#setup file not found, create a default one
writesetupfile()
else:
#a setup file already exists
reader = open(sys.path[0]+'/'+default.setupfilename,'r')
setupfile = reader.readlines()
reader.close()
clean2(setupfile)
checkversion = setupfile[1].split()[-1].split(')')[0]
#update the setup file if the checkversion is different from the version
if checkversion != version:
print '\n- WARNING -\nnew version detected\n%s was upgraded from version %s to version %s' %(program,checkversion,version)
writesetupfile(default.setupfilename+'2')
if setupfilecompatibility(default.setupfilename,default.setupfilename+'2')==True:
getsettings()
writesetupfile(setupfilename=default.setupfilename,path=sys.path[0],\
launchcommand=ctrl.launchcommand,autolaunch=ctrl.autolaunch,\
energyshift=ctrl.energyshift,\
valbandcolor=ctrl.valbandcolor,conbandcolor=ctrl.conbandcolor,\
bandlinewidth=ctrl.bandlinewidth,fermilinewidth=ctrl.fermilinewidth,separatorlinewidth=ctrl.separatorlinewidth,\
emptyspacewidth=ctrl.emptyspacewidth)
else:
print '\n- WARNING -\nsetup file system has changed since your last version\nall settings restored to default\nyou may have to reset some of your previous settings'
writesetupfile()
os.system('rm -f %s/%s' %(sys.path[0],default.setupfilename+'2'))
#=====================================================================================================================================================================
#=====================================================================================================================================================================
#COMMAND LINE
#abort if a keyword is not valid
for arg in ctrl.arglist:
if arg[0] == '-':
#a keyword is found
if arg not in ctrl.validkeywords:
#the keyword is not valid
print '\n- ERROR -\n%s is not a valid keyword' %arg
validkeywords = str()
for keyword in ctrl.validkeywords:
validkeywords+=keyword+', '
validkeywords = validkeywords[:-2]
print '\nvalid keywords are :\n%s\n' %validkeywords
if feedback.feedback==True:feedbackcrashed('?','invalid keyword')
sys.exit()
#abort if a keyword is repeated
for keyword in ctrl.validkeywords:
if ctrl.arglist.count(keyword) > 1:
print '\n- ERROR -\nkeyword %s is repeated %s times\n' %(keyword,ctrl.arglist.count(keyword))
if feedback.feedback==True:feedbackcrashed('?','repeated keyword')
sys.exit()
#get keywords
setautolaunch = False
setenergyshift = False
setlinecolor = False
setlinewidth = False
setspacewidth = False
setup= False
setdefault = False
if '-setautolaunch' in ctrl.arglist:
ctrl.arglist.pop(ctrl.arglist.index('-setautolaunch'))
setautolaunch = True
if '-setenergyshift' in ctrl.arglist:
ctrl.arglist.pop(ctrl.arglist.index('-setenergyshift'))
setenergyshift = True
if '-setlinecolor' in ctrl.arglist:
ctrl.arglist.pop(ctrl.arglist.index('-setlinecolor'))
setlinecolor = True
if '-setlinewidth' in ctrl.arglist:
ctrl.arglist.pop(ctrl.arglist.index('-setlinewidth'))
setlinewidth = True
if '-setspacewidth' in ctrl.arglist:
ctrl.arglist.pop(ctrl.arglist.index('-setspacewidth'))
setspacewidth = True
if '-setup' in ctrl.arglist:
ctrl.arglist.pop(ctrl.arglist.index('-setup'))
setautolaunch = True
setenergyshift = True
setlinecolor = True
setlinewidth = True
setspacewidth = True
if '-setdefault' in ctrl.arglist:
ctrl.arglist.pop(ctrl.arglist.index('-setdefault'))
setdefault = True
if '-debug' in ctrl.arglist:
ctrl.arglist.pop(ctrl.arglist.index('-debug'))
ctrl.debugmode = True
#(put additionnal keywords here)
#SETUP MODE
if setdefault==True:
#create a default setup file
print '\n--> starting SETUP MODE'
if feedback.feedback==True:feedbackbugged('SETUP')
writesetupfile()
print '\ndefault setup restored'
print '\n--> leaving SETUP MODE\n'
if feedback.feedback == True:feedbackcompleted('SETUP')
sys.exit()
getsettings()
if setautolaunch+setenergyshift+setlinecolor+setlinewidth+setspacewidth+setup!=0:
print '\n--> starting SETUP MODE'
if feedback.feedback==True:feedbackbugged('SETUP')
if setautolaunch==True:
#change launchcommand --> ctrl.launchcommand
print '\ncurrent xmgrace launch command is :\n%s\n' %ctrl.launchcommand
answer = str()
while answer not in ['yes','no']:
answer = raw_input('do you wish to change it (yes ; no) ? ')
if answer == 'yes':
ctrl.launchcommand = raw_input('\nenter the new xmgrace launch command :\n')
#change autolaunch --> ctrl.autolaunch
ctrl.autolaunch = raw_input('\nautomatically launch xmgrace each time a .agr file is created (yes ; no) ? ')
while ctrl.autolaunch not in ['yes','no']:
ctrl.autolaunch = raw_input('automatically launch xmgrace each time a .agr file is created (yes ; no) ? ')
if setenergyshift==True:
#change energy shift --> ctrl.energyshift
ctrl.energyshift = raw_input('\nshift energy eigeivalues to bring the fermi energy to zero (yes ; no) ? ')
while ctrl.energyshift not in ['yes','no']:
ctrl.energyshift = raw_input('shift energy eigeivalues to bring the fermi energy to zero (yes ; no) ? ')
if setlinecolor==True:
#change valence bands color --> ctrl.valbandcolor
ctrl.valbandcolor = raw_input('\nChoose the color of the valence bands : ')
while ctrl.valbandcolor not in ctrl.xmgracecolor.keys():
colors = str()
for color in ctrl.xmgracecolor.keys():
colors += '%s, ' %color
colors = colors[:-2]
print '\n- invalid entry -\npossible answers are :\n%s' %colors
ctrl.valbandcolor = raw_input('\nChoose the color of the valence bands : ')
#change conduction bands color --> ctrl.conbandcolor
ctrl.conbandcolor = raw_input('\nChoose the color of the conduction bands : ')
while ctrl.conbandcolor not in ctrl.xmgracecolor.keys():
colors = str()
for color in ctrl.xmgracecolor.keys():
colors += '%s, ' %color
colors = colors[:-2]
print '\n- invalid entry -\npossible answers are :\n%s' %colors
ctrl.conbandcolor = raw_input('\nChoose the color of the conduction bands : ')
if setlinewidth==True:
#change band lines width --> ctrl.bandlinewidth
ctrl.bandlinewidth = raw_input('\nChoose the width of the band lines : ')
while floatable(ctrl.bandlinewidth) == False:
ctrl.bandlinewidth = raw_input('Choose the width of the band lines : ')
#change fermi energy line width --> ctrl.fermilinewidth
ctrl.fermilinewidth = raw_input('\nChoose the width of the fermi energy line : ')
while floatable(ctrl.fermilinewidth) == False:
ctrl.fermilinewidth = raw_input('Choose the width of the fermi energy line : ')
#change separator lines width --> ctrl.separatorlinewidth
ctrl.separatorlinewidth = raw_input('\nChoose the width of the separator lines : ')
while floatable(ctrl.separatorlinewidth) == False:
ctrl.separatorlinewidth = raw_input('Choose the width of the separator lines : ')
if setspacewidth==True:
#change empty space(s) width --> ctrl.emptyspacewidth
ctrl.emptyspacewidth = raw_input('\nChoose the total width (in percentage) of the empty space(s) on the graph, if any : ')
while floatable(ctrl.emptyspacewidth) == False:
ctrl.emptyspacewidth = raw_input('Choose the total width (in percentage) of the empty space(s) on the graph, if any : ')
#overwrite setup file
writesetupfile(setupfilename=default.setupfilename,path=sys.path[0],\
launchcommand=ctrl.launchcommand,autolaunch=ctrl.autolaunch,\
energyshift=ctrl.energyshift,\
valbandcolor=ctrl.valbandcolor,conbandcolor=ctrl.conbandcolor,\
bandlinewidth=ctrl.bandlinewidth,fermilinewidth=ctrl.fermilinewidth,separatorlinewidth=ctrl.separatorlinewidth,\
emptyspacewidth=ctrl.emptyspacewidth)
print '\n--> leaving SETUP MODE\n'
if feedback.feedback==True:feedbackcompleted('SETUP')
sys.exit()
#get the filename --> ctrl.filename
if len(ctrl.arglist) == 0:
#user entered no filename in the command line
ctrl.filename = raw_input('\nEnter the filename : \n')
elif len(ctrl.arglist) == 1:
#user entered the filename in the command line
ctrl.filename = ctrl.arglist[0]
elif len(ctrl.arglist) > 1:
#user entered too much arguments in the command line
print '\n- ERROR -\ntoo many arguments entered in the command line\n'
if feedback.feedback==True:feedbackcrashed('?','too many arguments entered')
sys.exit()
#compute file type --> ctrl.filetype
#abort if the file type is not valid
if ctrl.filename.split('.')[-1][:3] == 'out':
ctrl.filetype = 'out'
elif ctrl.filename.split('.')[-1][:3] == 'dbs':
ctrl.filetype = 'dbs'
else:
print '\n- ERROR -\ninvalid file type (must be .out or .dbs)\n'
if feedback.feedback==True:feedbackcrashed('?','invalid filetype')
sys.exit()
#abort if the file does not exists
if detectfile(ctrl.filename,'.') == False:
print '\n- ERROR -\n"%s" file not found\n' %ctrl.filename
if feedback.feedback==True:feedbackcrashed('?','file not found')
sys.exit()
#activate debugmode, if needed
if ctrl.debugmode==True:
print '\n--> DEBUG MODE'
#=====================================================================================================================================================================
#=====================================================================================================================================================================
#READ THE FILE
#read file and acquire data --> ctrl.filedata
if ctrl.debugmode==True:print '\n--> file "%s" will be treated as a < %s > file' %(ctrl.filename,ctrl.filetype)
reader = open(ctrl.filename,"r")
ctrl.filedata = reader.readlines()
reader.close()
if ctrl.debugmode==True:print '\n--> file read successfully\n %s line(s) read' %len(ctrl.filedata)
clean2(ctrl.filedata)
#=====================================================================================================================================================================
#=====================================================================================================================================================================
#EXTRACT DATA FROM THE FILE
if ctrl.filetype == 'out':
if feedback.feedback==True:feedbackbugged('OUT')
#warning if the calculation is not completed
calculationcompleted = False
for line in ctrl.filedata:
if line == ' Calculation completed.':
calculationcompleted = True
if calculationcompleted == False:
print '\n- WARNING -\ncalculation not completed'
#compute number of datasets --> ctrl.ndataset
#compute first and last line of each dataset --> ctrl.datasetlocation
for i in range(len(ctrl.filedata)):
if ctrl.filedata[i][:10] == '== DATASET':
ctrl.ndataset += 1
if ctrl.datasetlocation != list():
ctrl.datasetlocation[-1][4] = i
ctrl.datasetlocation.append(['DATASET %s' %ctrl.ndataset,'first line =',i+1,'last line =','not found'])
if ctrl.filedata[i][:17] == '== END DATASET(S)':
ctrl.datasetlocation[-1][4] = i
if ctrl.debugmode==True:
print '\n--> dataset locations computed'
for line in ctrl.datasetlocation:
print ' %s' %line
#compute list of datasets to use --> ctrl.useddataset
validanswer = False
allowedchars = ['-',',','0','1','2','3','4','5','6','7','8','9']
print '\n%s dataset(s) detected' %ctrl.ndataset
if ctrl.datasetlocation[-1][-1] == 'not found':
print 'the last dataset is not completed and will be ignored'
ctrl.ndataset -= 1
if ctrl.ndataset == 0:
print '\n- ERROR -\nno completed dataset available\n'
if feedback.feedback==True:feedbackcrashed('OUT','no completed dataset')
sys.exit()
elif ctrl.ndataset == 1:
ctrl.useddataset = [1]
elif ctrl.ndataset > 1:
while validanswer == False:
answer = raw_input('\nWhich dataset(s) do you want to use (1 to %s) ? ' %ctrl.ndataset)
ctrl.useddataset = list()
validanswer = True
#removes empty spaces from answer
answersplit = answer.split()
answer = str()
for splitted in answersplit:
answer+=splitted
#compute ctrl.useddataset
try:
S = answer.split(',')
for i in range(len(S)):
if '-' in S[i]:
a = int(S[i].split('-')[0])
b = int(S[i].split('-')[1])
ctrl.useddataset += range(a,b+1)
else:
ctrl.useddataset += [int(S[i])]
rmrepetitions(ctrl.useddataset)
ctrl.useddataset = sort(ctrl.useddataset)
except:
validanswer = False
#verify validity
for number in ctrl.useddataset:
if number < 1 or number > ctrl.ndataset:
validanswer = False
#show format instructions to user if invalid entry
if validanswer == False:
print '\n- invalid entry -'
print 'use commas to separate different datasets'
print 'you can use minus signs to specify a group of successive datasets'
print 'for example, if you want to use the datasets 1, 3, 4, 5, 6 and 8, type : 1,3-6,8'
if ctrl.debugmode==True:print '\n--> list of used datasets computed\n %s' %ctrl.useddataset
#get type of each atom --> data.typat
#(assuming only one occurence of "typat" is present in the .out file)
try:
flag_typat = False
k=0
for i in range(len(ctrl.filedata)):
if flag_typat==False and ctrl.filedata[i].split()[0] == 'typat':
flag_typat = True
k = i
data.typat = ctrl.filedata[k].split()[1:]
while ctrl.filedata[k+1].split()[0].isdigit()==True:
k+=1
for j in range(len(ctrl.filedata[k].split())):
data.typat.append(ctrl.filedata[k].split()[j])
for i in range(len(data.typat)):
data.typat[i] = int(data.typat[i])
except:
data.typat = '?'
if ctrl.debugmode==True:print '\n--> typat found\n %s' %data.typat
#compute number of valence bands --> ctrl.nvalenceband
#(assuming only one occurence of "- pspini" is present for each atom type)
try:
nion = list()
for i in range(len(ctrl.filedata)):
if ctrl.filedata[i][:9] == '- pspini:':
nion.append(float(ctrl.filedata[i+3].split()[2]))
for i in range(len(data.typat)):
ctrl.nvalenceband += int(nion[data.typat[i]-1])
ctrl.nvalenceband = ctrl.nvalenceband/2.0
if ctrl.nvalenceband%1 == 0:
# ctrl.nvalenceband is an integer
ctrl.nvalenceband = int(ctrl.nvalenceband)
else:
# ctrl.nvalenceband is not an integer
ctrl.nvalenceband = int(ctrl.nvalenceband) + 1
except:
ctrl.nvalenceband = '?'
if ctrl.debugmode==True:print '\n--> number of valence bands computed\n %s' %ctrl.nvalenceband
#get fermi energy --> data.fermienergy
#(assuming only one occurence of "Fermi energy" is present in the .out file)
try:
for i in range(len(ctrl.filedata)):
if ctrl.filedata[i].split()[0] == 'Fermi':
data.fermienergy = float(ctrl.filedata[i].split()[-5])*hartree_to_eV
except:
pass
if data.fermienergy == float(0):
data.fermienergy = 'automatic'
if ctrl.debugmode==True:print '\n--> fermi energy found\n %s' %data.fermienergy
#------------------------------
parser_template = '{}\d?\s?=?\s*([\d.E+]+)'
#compute k-points and energy eigenvalues for each dataset
starter = ctrl.filedata[:ctrl.datasetlocation[0][2]-1]
for d in range(len(ctrl.useddataset)):
n = ctrl.useddataset[d] #number of the dataset
dataset = ctrl.filedata[ctrl.datasetlocation[n-1][2]-1:ctrl.datasetlocation[n-1][4]]
#compute the dataset key --> datasetkey
flag_nband = False
datasetkey = list()
#ecut
datasetkey.append(['ecut:'])
for i in range(len(starter)):
if starter[i].split()[0] == 'ecut%s' %n or starter[i].split()[0] == 'ecut':
value = float(re.search(parser_template.format('ecut'), starter[i]).group(1))
datasetkey[-1].append(value)
if len(datasetkey[-1]) == 1:
datasetkey[-1].append('notfound')
#natom
datasetkey.append(['natom:'])
for i in range(len(starter)):
if starter[i].split()[0] == 'natom%s' %n or starter[i].split()[0] == 'natom':
value = float(re.search(parser_template.format('natom'), starter[i]).group(1))
datasetkey[-1].append(value)
if len(datasetkey[-1]) == 1:
datasetkey[-1].append(float(1)) #default
#nband
datasetkey.append(['nband:'])
for i in range(len(starter)):
if starter[i].split()[0] == 'nband%s' %n or starter[i].split()[0] == 'nband':
value = float(re.search(parser_template.format('nband'), starter[i]).group(1))
datasetkey[-1].append(value)
if len(datasetkey[-1]) == 1:
datasetkey[-1].append(float(1)) #default
#occopt
datasetkey.append(['occopt:'])
for i in range(len(starter)):
if starter[i].split()[0] == 'occopt%s' %n or starter[i].split()[0] == 'occopt':
value = float(re.search(parser_template.format('occopt'), starter[i]).group(1))
datasetkey[-1].append(value)
if len(datasetkey[-1]) == 1:
datasetkey[-1].append(float(1)) #default
#set fermi energy to "automatic" if occopt is non metallic
if datasetkey[-1][-1] in [0,1,2]:
data.fermienergy = 'automatic'
#toldfe
datasetkey.append(['toldfe:'])
for i in range(len(starter)):
if starter[i].split()[0] == 'toldfe%s' %n or starter[i].split()[0] == 'toldfe':
datasetkey[-1].append(float(starter[i].split()[1]))
if len(datasetkey[-1]) == 1:
datasetkey[-1].append(float(0)) #default
#toldff
datasetkey.append(['toldff:'])
for i in range(len(starter)):
if starter[i].split()[0] == 'toldff%s' %n or starter[i].split()[0] == 'toldff':
datasetkey[-1].append(float(starter[i].split()[1]))
if len(datasetkey[-1]) == 1:
datasetkey[-1].append(float(0)) #default
#tolvrs
datasetkey.append(['tolvrs:'])
for i in range(len(starter)):
if starter[i].split()[0] == 'tolvrs%s' %n or starter[i].split()[0] == 'tolvrs':
datasetkey[-1].append(float(starter[i].split()[1]))
if len(datasetkey[-1]) == 1:
datasetkey[-1].append(float(0)) #default
#tolwfr
datasetkey.append(['tolwfr:'])
for i in range(len(starter)):
if starter[i].split()[0] == 'tolwfr%s' %n or starter[i].split()[0] == 'tolwfr':
datasetkey[-1].append(float(starter[i].split()[1]))
if len(datasetkey[-1]) == 1:
datasetkey[-1].append(float(0)) #default
#typat
datasetkey.append(['typat:'])
for i in range(len(starter)):
if starter[i].split()[0] == 'typat%s' %n or starter[i].split()[0] == 'typat':
k = i
temp = list()
temp = starter[k].split()[1:]
while starter[k+1].split()[0].isdigit()==True:
k+=1
temp += starter[k].split()
for j in range(len(temp)):
temp[j] = int(temp[j])
datasetkey[-1]+=temp
if len(datasetkey[-1]) == 1:
datasetkey[-1].append(float(1)) #default
#reciprocalvectors
datasetkey.append(['reciprocalvectors:'])
for i in range(len(dataset)):
if dataset[i].split()[0] == 'R(1)=':
for j in range(3):
linesplit = dataset[i+j].split()
datasetkey[-1].append(float(linesplit[-3]))
datasetkey[-1].append(float(linesplit[-2]))
datasetkey[-1].append(float(linesplit[-1]))
if len(datasetkey[-1]) == 1:
datasetkey[-1].append('notfound') #default
#reduced coordinates
datasetkey.append(['reducedcoordinates:'])
for i in range(len(dataset)):
if dataset[i][:20] == ' reduced coordinates':
k = i
while len(dataset[k+1].split()) == 3:
datasetkey[-1].append(float(dataset[k+1].split()[0]))
datasetkey[-1].append(float(dataset[k+1].split()[1]))
datasetkey[-1].append(float(dataset[k+1].split()[2]))
k+=1
#verify the dataset key
if d == 0:
#compute database key --> ctrl.databasekey
ctrl.databasekey = datasetkey
refdataset = n
else:
if datasetkey != ctrl.databasekey:
print '\n- ERROR -\nDATASET %s is not compatible with DATASET %s' %(n,refdataset)
#given reason
for i in range(len(datasetkey)):
if datasetkey[i] != ctrl.databasekey[i]:
print '%s are different' %datasetkey[i][0][:-1]
print ''
if feedback.feedback==True:feedbackcrashed('OUT','datasetkey not compatible')
sys.exit()
else:
pass
#get eigenvalue energy units
for line in dataset:
if line.split()[0] == 'Eigenvalues':
data.units = line.replace('(','').replace(')','').split()[1]
#get k-points --> data.kpt
#get energy eigenvalues --> data.energy
kptlist = list()
for i in range(len(dataset)):
if dataset[i].split()[0][:4] == 'kpt#':
linesplit = dataset[i].split()
kptlist.append(array([float(linesplit[-5]),float(linesplit[-4]),float(linesplit[-3])],dtype=float))
k=i+1
energylist = list()
while dataset[k].split()[0].replace('-','').replace('.','').replace('e','').replace('E','').isdigit():
linesplit = dataset[k].split()
for j in range(len(linesplit)):
energylist.append(float(linesplit[j]))
k+=1
if data.units == 'hartree':
data.energy.append(array(energylist)*ctrl.hartree_to_eV)
else:
data.energy.append(array(energylist))
data.kpt += kptlist
if ctrl.debugmode==True:
print '\n--> k-points found for DATASET %s\n {%s element(s)}' %(n,len(kptlist))
if ctrl.debugmode==True:
print '\n--> energy eigenvalues found for DATASET %s\n {%s element(s)} for each k-point' %(n,len(data.energy[0]))
#-------------------------------------
#compute special k-points --> data.specialkpt
rmsuccessiverepetitions(data.kpt,data.energy)
for i in range(len(data.kpt)):
if i == 0:
data.specialkpt.append(data.kpt[i])
elif i == 1:
vector2 = data.kpt[i]-data.kpt[i-1]
elif i == len(data.kpt)-1:
data.specialkpt.append(data.kpt[i])
else:
vector1 = data.kpt[i] - data.kpt[i-1]
if angle(vector1,vector2) < ctrl.angletol:
pass
else:
data.specialkpt.append(data.kpt[i-1])
vector2 = vector1
if ctrl.debugmode==True:
print '\n--> special k-points computed\n %s element(s)' %len(data.specialkpt)
for i in range(len(data.specialkpt)):
print ' %s' %data.specialkpt[i]
#compute band structure scheme --> ctrl.bandstructurescheme
L = 0
dico = dict()
for i in range(len(data.specialkpt)):
k = str(data.specialkpt[i])
if not k in dico.keys():
L+=1
dico[k] = '{l%s}' %ctrl.alphabet[L]
ctrl.bandstructurescheme += '-%s' %dico[k]
ctrl.bandstructurescheme = ctrl.bandstructurescheme[1:]
if ctrl.debugmode==True:print '\n--> band structure scheme computed\n %s' %ctrl.bandstructurescheme
if ctrl.filetype == 'dbs':
if feedback.feedback==True:feedbackbugged('DBS')
#get graph title --> graph.title
for i in range (len(ctrl.filedata)):
if ctrl.filedata[i].split()[0] == 'GRAPH':
graph.title = ctrl.filedata[i+1]
while graph.title[0] == ' ':graph.title = graph.title[1:]
while graph.title[-1] == ' ':graph.title = graph.title[:-1]
#get reciprocal vectors --> data.G
for i in range(len(ctrl.filedata)):
if ctrl.filedata[i].split()[0] == 'reciprocalvectors:':
linesplit = ctrl.filedata[i].split()
data.G.append(array([float(linesplit[1]),float(linesplit[2]),float(linesplit[3])],dtype=float))
data.G.append(array([float(linesplit[4]),float(linesplit[5]),float(linesplit[6])],dtype=float))
data.G.append(array([float(linesplit[7]),float(linesplit[8]),float(linesplit[9])],dtype=float))
if ctrl.debugmode==True:
print '\n--> reciprocal vectors found'
for i in range(3):
print ' G(%s)= %s' %(i+1,data.G[i])
#get number of valence bands
for i in range(len(ctrl.filedata)):
if ctrl.filedata[i].split()[0] == 'NUMBER':
ctrl.nvalenceband = int(ctrl.filedata[i+1].split()[0])
#get fermi energy
for i in range(len(ctrl.filedata)):
if ctrl.filedata[i].split()[0] == 'FERMI':
data.fermienergy = ctrl.filedata[i+1].split()[0]
if ctrl.debugmode==True:print '\n--> fermi energy found\n %s' %data.fermienergy
#get special k-points --> ctrl.dicospecialkpt {caption:array}
for i in range(len(ctrl.filedata)):
linesplit = ctrl.filedata[i].split()
if '}=' in linesplit[0] and linesplit[0][0] == '{':
kptcaption = linesplit[0].split('=')[0]
ctrl.dicospecialkpt[kptcaption] = array([float(linesplit[1]),float(linesplit[2]),float(linesplit[3])],dtype=float)
if ctrl.debugmode==True:
print '\n--> special k-points found\n %s element(s)' %len(ctrl.dicospecialkpt)
for i in range(len(ctrl.dicospecialkpt)):
print ' %s : %s' %(ctrl.dicospecialkpt.keys()[i],ctrl.dicospecialkpt.values()[i])
#get band structure scheme --> ctrl.bandstructurescheme
for i in range(len(ctrl.filedata)):
if ctrl.filedata[i].split()[0][0] == '{' and ctrl.filedata[i].split()[-1][-1] == '}':
ctrl.bandstructurescheme = ctrl.filedata[i]
while ctrl.bandstructurescheme[0] != '{':ctrl.bandstructurescheme = ctrl.bandstructurescheme[1:]
while ctrl.bandstructurescheme[-1] != '}':ctrl.bandstructurescheme = ctrl.bandstructurescheme[:-1]
if ctrl.debugmode==True:print '\n--> band structure scheme found\n %s' %ctrl.bandstructurescheme
#get k-points --> data.kpt
#get energy eigenvalues --> data.energy
for i in range(len(ctrl.filedata)):
if ctrl.filedata[i].split()[0] == 'kpt':
linesplit = ctrl.filedata[i+1].split()
data.kpt.append(array([float(linesplit[0]),float(linesplit[1]),float(linesplit[2])],dtype=float))
energieslist = list()
for energy in ctrl.filedata[i+2].split():
energieslist.append(float(energy))
data.energy.append(array(energieslist))
if ctrl.debugmode==True:print '\n--> k-points found\n {%s element(s)}' %len(data.kpt)
if ctrl.debugmode==True:print '\n--> energy eigenvalues found\n {%s element(s)} for each k-point' %len(data.energy[0])
#compute segment caption list --> ctrl.segmentcaptionlist
for captiongroup in ctrl.bandstructurescheme.split():
captions = captiongroup.split('-')
for i in range(len(captions)-1):
ctrl.segmentcaptionlist.append([captions[i],captions[i+1]])
ctrl.segmentcaptionlist.append(['empty space'])
ctrl.segmentcaptionlist.pop(-1)
if ctrl.debugmode==True:
print '\n--> segment caption list computed\n %s element(s)' %len(ctrl.segmentcaptionlist)
for i in range(len(ctrl.segmentcaptionlist)):
print ' %s' %ctrl.segmentcaptionlist[i]
#compute segment cartesian length --> ctrl.segmentcartlength
nvac = 0
nseg = 0
totallen = 0
segmentcartlength = list()
for caption in ctrl.segmentcaptionlist:
if caption[0] == 'empty space':
nvac+=1
segmentcartlength.append('empty space')
else:
nseg+=1
ki = kpt_red_to_cart(ctrl.dicospecialkpt[caption[0]],data.G)
kf = kpt_red_to_cart(ctrl.dicospecialkpt[caption[1]],data.G)
segmentcartlength.append(norm(kf-ki))
totallen += segmentcartlength[-1]
if nvac != 0:
spacelen = (float(ctrl.spacepercent)/100)*totallen/nvac/(1-float(ctrl.spacepercent)/100)
for i in range(len(segmentcartlength)):
if segmentcartlength[i] == 'empty space':
segmentcartlength[i]=spacelen
ctrl.segmentcartlength = array(segmentcartlength)
if ctrl.debugmode==True:
print '\n--> segment cartesian length computed\n %s element(s)' %len(ctrl.segmentcartlength)
for i in range(len(ctrl.segmentcartlength)):
print ' %s' %ctrl.segmentcartlength[i]
#compute segment relative length --> ctrl.segmentrellength
totallen = sum(ctrl.segmentcartlength)
segmentrellength = list()
for length in ctrl.segmentcartlength:
segmentrellength.append(length/totallen)
ctrl.segmentrellength = array(segmentrellength)
if ctrl.debugmode==True:
print '\n--> segment relative length computed\n %s element(s)' %len(ctrl.segmentrellength)
for i in range(len(ctrl.segmentrellength)):
print ' %s' %ctrl.segmentrellength[i]
#compute positions of xticks --> ctrl.xtick
xtick = list()
for i in range(len(ctrl.segmentrellength)):
xtick.append(sum(ctrl.segmentrellength[:i+1]))
xtick.insert(0,float(0))
ctrl.xtick = array(xtick)
if ctrl.debugmode==True:
print '\n--> positions of xticks computed\n %s element(s)' %len(ctrl.xtick)
for i in range(len(ctrl.xtick)):
print ' %s' %ctrl.xtick[i]
#compute captions of xticks --> ctrl.captiontick
ctrl.captiontick = ctrl.bandstructurescheme.replace('-',' ').split()
for i in range(len(ctrl.captiontick)):
if ctrl.captiontick[i][1] == 'l':
ctrl.captiontick[i] = '"%s"' %ctrl.captiontick[i][2]
elif ctrl.captiontick[i][1] == 's':
ctrl.captiontick[i] = '"\\f{Symbol}%s"' %ctrl.captiontick[i][2]
if ctrl.debugmode==True:
print '\n--> captions of xticks computed\n %s elements(s)' %len(ctrl.captiontick)
for i in range(len(ctrl.captiontick)):
print ' %s' %ctrl.captiontick[i]
#compute dictionary mapping x coordinate on graph with k-points --> ctrl.dicoxkpt {x,kpt} (type(x) = float, type(kpt)=array(3,Float))
for i in range(len(ctrl.segmentcaptionlist)):
caption = ctrl.segmentcaptionlist[i]
if caption == ['empty space']:
pass
else:
correct = 0
ki = ctrl.dicospecialkpt[caption[0]]
kf = ctrl.dicospecialkpt[caption[1]]
for k in range(len(data.kpt)):
kpt=data.kpt[k]
goodkpt=False
if list(kpt)==list(ki):
goodkpt = True
xfrac = 0
elif list(kpt)==list(kf):
goodkpt = True
xfrac=1
elif angle(kpt-ki,kf-ki) < ctrl.angletol2 and dot(kf-kpt,kf-ki) > 0.0:
goodkpt = True
xfrac=dot(kpt-ki,kf-ki)/norm(kf-ki)/norm(kf-ki)
if goodkpt == True:
correct+=1
ctrl.dicoxkpt[xfrac*ctrl.segmentrellength[i]+ctrl.xtick[i]] = kpt
#compute abcissa array --> ctrl.X
ctrl.X = sort(ctrl.dicoxkpt.keys())
#compute ordinate arrays --> ctrl.Y
xsort = sort(ctrl.dicoxkpt.keys())
for i in range(len(xsort)):
x = xsort[i]
k = ctrl.dicoxkpt[x]
for j in range(len(data.kpt)):
if list(data.kpt[j]) == list(k):
index = j
ctrl.Y.append(data.energy[index])
#=====================================================================================================================================================================
#=====================================================================================================================================================================
#WRITE THE FILE
if ctrl.filetype == 'out':
#compute the .dbs filename --> ctrl.dbsfilename
ctrl.dbsfilename = '%s.dbs' %ctrl.filename
#open the writer
writer = open(ctrl.dbsfilename,"w")
#write the default graph title
writer.write('GRAPH TITLE:\nBand Structure from %s\n' %ctrl.filename)
#write number of valence bands
writer.write('\nNUMBER OF VALENCE BANDS:\n%s\n' %ctrl.nvalenceband)
#write the fermi energy
writer.write('\nFERMI ENERGY (eV):\n%s\n' %data.fermienergy)
#write the special kpts
rmrepetitions(data.specialkpt)
writer.write('\nSPECIAL K-POINTS (reduced coord):\n')
for i in range(len(data.specialkpt)):
k = data.specialkpt[i]
kx = '%1.4f' %k[0]
ky = '%1.4f' %k[1]
kz = '%1.4f' %k[2]
if k[0]>=0:kx = ' %1.4f' %k[0]
if k[1]>=0:ky = ' %1.4f' %k[1]
if k[2]>=0:kz = ' %1.4f' %k[2]
if i <= 25:
writer.write('{l%s}= %s %s %s\n' %(ctrl.alphabet[i+1],kx,ky,kz))
else:
writer.write('{l%s}= %s %s %s\n' %('x',kx,ky,kz))
#write the band structure scheme
writer.write('\nBAND STRUCTURE SCHEME:\n')
writer.write('%s\n' %ctrl.bandstructurescheme)
#write the kpts and energies
rmrepetitions(data.kpt,data.energy)
writer.write('\n\n\nDATABASE:\n')
for i in range(len(data.kpt)):
k = data.kpt[i]
kx = '%1.4f' %k[0]
ky = '%1.4f' %k[1]
kz = '%1.4f' %k[2]
if k[0]>=0:kx = ' %1.4f' %k[0]
if k[1]>=0:ky = ' %1.4f' %k[1]
if k[2]>=0:kz = ' %1.4f' %k[2]
writer.write('kpt %s\n%s %s %s\n' %(i+1,kx,ky,kz))
for e in data.energy[i]:
if e >=0:writer.write(' %e ' %e)
else:writer.write('%e ' %e)
writer.write('\n')
#write the database key
writer.write('\nDATABASE KEY:\n')
for i in range(len(ctrl.databasekey)):
for j in range(len(ctrl.databasekey[i])):
writer.write('%s ' %ctrl.databasekey[i][j])
writer.write('\n')
#close the writer
writer.close()
print '\n"%s" file created successfully\n' %ctrl.dbsfilename
if feedback.feedback==True:feedbackcompleted('OUT')
if ctrl.filetype == 'dbs':
#compute the .agr filename --> ctrl.agrfilename
ctrl.agrfilename = '%s.agr' %ctrl.filename[:-4]
#compute fermi energy value in eV --> ctrl.fermienergy
if data.fermienergy == 'automatic':
maxlist = list()
for i in range(len(ctrl.Y)):
maxlist.append(ctrl.Y[i][ctrl.nvalenceband-1])
data.fermienergy = max(maxlist)
else:
data.fermienergy = float(data.fermienergy)
#compute the energy gap --> ctrl.bandgap
maxHOMOlist = list()
minLUMOlist = list()
for i in range(len(ctrl.Y)):
maxHOMOlist.append(ctrl.Y[i][ctrl.nvalenceband-1])
minLUMOlist.append(ctrl.Y[i][ctrl.nvalenceband])
maxHOMO = max(maxHOMOlist)
minLUMO = min(minLUMOlist)
ctrl.bandgap = minLUMO - maxHOMO
#compute gap style --> ctrl.gapstyle
ctrl.gapstyle = '(indirect)' #default
maxHOMOindexlist = list()
minLUMOindexlist = list()
i = 0
while maxHOMO in maxHOMOlist:
maxHOMOindexlist.append(maxHOMOlist.index(maxHOMO)+i)
maxHOMOlist.pop(maxHOMOlist.index(maxHOMO))
i += 1
i = 0
while minLUMO in minLUMOlist:
minLUMOindexlist.append(minLUMOlist.index(minLUMO)+i)
minLUMOlist.pop(minLUMOlist.index(minLUMO))
i += 1
for M in maxHOMOindexlist:
if M in minLUMOindexlist:
ctrl.gapstyle = '(direct)'
#shift energies to bring the fermi energy to zero, if wanted
if ctrl.energyshift == 'yes':
for i in range(len(ctrl.Y)):
ctrl.Y[i] = ctrl.Y[i] - data.fermienergy
data.fermienergy = 0.0
#compute plot.worldymin --> graph.worldymin
minlist = list()
for array in ctrl.Y:
minlist.append(min(array))
graph.worldymin = min(minlist)
#compute plot.worldymax --> graph.worldymax
maxlist = list()
for array in ctrl.Y:
maxlist.append(max(array))
graph.worldymax = max(maxlist)
#adjust worldymin et worldymax
width = graph.worldymax - graph.worldymin
graph.worldymin = graph.worldymin - 0.05*width
graph.worldymax = graph.worldymax + 0.05*width
#open the writer
writer = open(ctrl.agrfilename,"w")
#write the file
writer.write('# file produced using %s version %s\n' %(program,version))
writer.write('\n')
writer.write('# version of xmgrace:\n')
writer.write('@ version 50114\n')
writer.write('\n')
writer.write('# graph title:\n')
writer.write('@ title "%s"\n' %graph.title)
writer.write('\n')
writer.write('# graph range:\n')
writer.write('@ world xmin 0\n')
writer.write('@ world xmax 1\n')
writer.write('@ world ymin %s\n' %graph.worldymin)
writer.write('@ world ymax %s\n' %graph.worldymax)
writer.write('\n')
writer.write('# X axis properties:\n')
writer.write('@ xaxis tick major size 0.0\n') #height of x tick lines
writer.write('@ xaxis tick spec type both\n') #???
writer.write('@ xaxis tick spec 15\n') #???
for i in range(len(ctrl.xtick)):
writer.write('@ xaxis tick major %s, %s\n' %(i,ctrl.xtick[i]))
writer.write('@ xaxis ticklabel %s, %s\n' %(i,ctrl.captiontick[i]))
if ctrl.bandgap > 0:
writer.write('@ xaxis label "E\sG %s\N = %s eV"\n' %(ctrl.gapstyle,ctrl.bandgap))
writer.write('\n')
writer.write('# Y axis properties:\n')
writer.write('@ yaxis label "Energy (eV)"\n')
writer.write('@ yaxis tick major 5\n')
writer.write('@ yaxis tick minor 1\n')
writer.write('@ yaxis tick place normal\n')
writer.write('\n')
writer.write('# alternate Y axis properties:\n')
writer.write('@ altyaxis on\n')
writer.write('@ altyaxis ticklabel on\n')
writer.write('@ altyaxis ticklabel place opposite\n')
writer.write('@ altyaxis ticklabel type spec\n')
writer.write('@ altyaxis tick type spec\n')
writer.write('@ altyaxis tick spec 1\n')
writer.write('@ altyaxis tick major 0, %s\n' %data.fermienergy)
writer.write('@ altyaxis ticklabel 0, "\\f{Symbol}e\\f{}\sF\N"\n') #epsilon fermi symbol
writer.write('\n')
writer.write('# frame properties:\n')
writer.write('@ frame linewidth %s\n' %ctrl.separatorlinewidth)
writer.write('\n')
s = 0
writer.write('# plot of energy bands:\n')
for i in range(len(ctrl.Y[0])):
if i+1 > ctrl.nvalenceband:
color = ctrl.xmgracecolor[ctrl.conbandcolor]
else:
color = ctrl.xmgracecolor[ctrl.valbandcolor]
writer.write('@ s%s line linewidth %s\n' %(s,ctrl.bandlinewidth))
writer.write('@ s%s line color %s\n' %(s,color))
s+=1
writer.write('@ TYPE xy\n')
for j in range(len(ctrl.X)):
writer.write(' %s \t %s\n' %(ctrl.X[j],ctrl.Y[j][i]))
writer.write(' &\n')
writer.write('\n')
writer.write('# plot of fermi energy line:\n')
writer.write('@ s%s linewidth %s\n' %(s,ctrl.fermilinewidth))
writer.write('@ s%s linestyle 2\n' %s)
writer.write('@ s%s line color 1\n' %s)
s+=1
writer.write('@ TYPE xy\n')
writer.write(' %s \t %s\n' %(0,data.fermienergy))
writer.write(' %s \t %s\n' %(1,data.fermienergy))
writer.write(' &\n')
writer.write('\n')
writer.write('# plot of empty spaces:\n')
for i in range(len(ctrl.segmentcaptionlist)):
if ctrl.segmentcaptionlist[i] == ['empty space']:
writer.write('@ s%s linewidth %s\n' %(s,ctrl.bandlinewidth))
writer.write('@ s%s line color 0\n' %s)
s+=1
xi = ctrl.xtick[i]
xf = ctrl.xtick[i+1]
xsort = list(sort(ctrl.dicoxkpt.keys()))
index = xsort.index(xi)
writer.write('@ TYPE xy\n')
writer.write(' %s \t %s\n' %(xi,data.fermienergy))
writer.write(' %s \t %s\n' %(xf,data.fermienergy))
writer.write(' &\n')
for j in range(len(ctrl.Y[0])):
writer.write('@ s%s linewidth %s\n' %(s,ctrl.bandlinewidth))
writer.write('@ s%s line color 0\n' %s)
s+=1
writer.write('@ TYPE xy\n')
writer.write(' %s \t %s\n' %(xi,ctrl.Y[index][j]))
writer.write(' %s \t %s\n' %(xf,ctrl.Y[index+1][j]))
writer.write(' &\n')
writer.write('\n')
writer.write('# plot of vertical separators:\n')
for i in range(len(ctrl.xtick)-2):
writer.write('@ s%s linewidth %s\n' %(s,ctrl.separatorlinewidth))
writer.write('@ s%s line color 1\n' %s)
s+=1
writer.write('@ TYPE xy\n')
writer.write(' %s \t %s\n' %(ctrl.xtick[i+1],graph.worldymin))
writer.write(' %s \t %s\n' %(ctrl.xtick[i+1],graph.worldymax))
writer.write(' &\n')
writer.write('\n')
#close the writer
writer.close()
print '\n"%s" file created successfully\n' %ctrl.agrfilename
if feedback.feedback==True:feedbackcompleted('DBS')
#=====================================================================================================================================================================
#=====================================================================================================================================================================
#AUTOLAUNCH
if ctrl.filetype == 'dbs' and ctrl.autolaunch == 'yes':
print 'launching xmgrace using command :\n> %s %s &\n' %(ctrl.launchcommand,ctrl.agrfilename)
os.system('%s %s &' %(ctrl.launchcommand,ctrl.agrfilename))
#=====================================================================================================================================================================
#---------------------------------------------------------------------------------------------------------------------------------------------------------------------
| gpl-3.0 |
Akshay0724/scikit-learn | sklearn/model_selection/_split.py | 12 | 63090 | """
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# Raghav RV <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from collections import Iterable
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.misc import comb
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.validation import _num_samples, column_or_1d
from ..utils.validation import check_array
from ..utils.multiclass import type_of_target
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..utils.fixes import bincount
from ..utils.fixes import signature
from ..utils.random import choice
from ..base import _pprint
__all__ = ['BaseCrossValidator',
'KFold',
'GroupKFold',
'LeaveOneGroupOut',
'LeaveOneOut',
'LeavePGroupsOut',
'LeavePOut',
'ShuffleSplit',
'GroupShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'train_test_split',
'check_cv']
class BaseCrossValidator(with_metaclass(ABCMeta)):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def __init__(self):
# We need this for the build_repr to work properly in py2.7
# see #6304
pass
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, groups=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)
"""
for test_index in self._iter_test_indices(X, y, groups):
test_mask = np.zeros(_num_samples(X), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, groups=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for train_index, test_index in loo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit, domain-specific
stratification of the dataset.
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def _iter_test_indices(self, X, y=None, groups=None):
return range(_num_samples(X))
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
p : int
Size of the test sets.
Examples
--------
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for train_index, test_index in lpo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, groups=None):
for combination in combinations(range(_num_samples(X)), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(with_metaclass(ABCMeta, BaseCrossValidator)):
"""Base class for KFold, GroupKFold, and StratifiedKFold"""
@abstractmethod
def __init__(self, n_splits, shuffle, random_state):
if not isinstance(n_splits, numbers.Integral):
raise ValueError('The number of folds must be of Integral type. '
'%s of type %s was passed.'
% (n_splits, type(n_splits)))
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_splits=2 or more,"
" got n_splits={0}.".format(n_splits))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
if self.n_splits > n_samples:
raise ValueError(
("Cannot have number of splits n_splits={0} greater"
" than the number of samples: {1}.").format(self.n_splits,
n_samples))
for train, test in super(_BaseKFold, self).split(X, y, groups):
yield train, test
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_splits=2)
>>> kf.get_n_splits(X)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
KFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in kf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first ``n_samples % n_splits`` folds have size
``n_samples // n_splits + 1``, other folds have size
``n_samples // n_splits``, where ``n_samples`` is the number of samples.
See also
--------
StratifiedKFold
Takes group information into account to avoid building folds with
imbalanced class distributions (for binary or multiclass
classification tasks).
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_splits=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n_splits, shuffle, random_state)
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_splits = self.n_splits
fold_sizes = (n_samples // n_splits) * np.ones(n_splits, dtype=np.int)
fold_sizes[:n_samples % n_splits] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class GroupKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping groups.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct groups is approximately the same in each fold.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.model_selection import GroupKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> groups = np.array([0, 0, 2, 2])
>>> group_kfold = GroupKFold(n_splits=2)
>>> group_kfold.get_n_splits(X, y, groups)
2
>>> print(group_kfold)
GroupKFold(n_splits=2)
>>> for train_index, test_index in group_kfold.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit domain-specific
stratification of the dataset.
"""
def __init__(self, n_splits=3):
super(GroupKFold, self).__init__(n_splits, shuffle=False,
random_state=None)
def _iter_test_indices(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
unique_groups, groups = np.unique(groups, return_inverse=True)
n_groups = len(unique_groups)
if self.n_splits > n_groups:
raise ValueError("Cannot have number of splits n_splits=%d greater"
" than the number of groups: %d."
% (self.n_splits, n_groups))
# Weight groups by their number of occurrences
n_samples_per_group = np.bincount(groups)
# Distribute the most frequent groups first
indices = np.argsort(n_samples_per_group)[::-1]
n_samples_per_group = n_samples_per_group[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_splits)
# Mapping from group index to fold index
group_to_fold = np.zeros(len(unique_groups))
# Distribute samples by adding the largest weight to the lightest fold
for group_index, weight in enumerate(n_samples_per_group):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
group_to_fold[indices[group_index]] = lightest_fold
indices = group_to_fold[groups]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_splits=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size ``trunc(n_samples / n_splits)``, the last one has
the complementary.
"""
def __init__(self, n_splits=3, shuffle=False, random_state=None):
super(StratifiedKFold, self).__init__(n_splits, shuffle, random_state)
def _make_test_folds(self, X, y=None, groups=None):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = np.asarray(y)
n_samples = y.shape[0]
unique_y, y_inversed = np.unique(y, return_inverse=True)
y_counts = bincount(y_inversed)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError("All the n_groups for individual classes"
" are less than n_splits=%d."
% (self.n_splits))
if self.n_splits > min_groups:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of groups for any class cannot"
" be less than n_splits=%d."
% (min_groups, self.n_splits)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_splits)) as data to the KFold
per_cls_cvs = [
KFold(self.n_splits, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_splits)))
for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[y == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_splits)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[y == cls] = cls_test_folds
return test_folds
def _iter_test_masks(self, X, y=None, groups=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedKFold, self).split(X, y, groups)
class TimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator
Provides train/test indices to split time series data samples
that are observed at fixed time intervals, in train/test sets.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of splits. Must be at least 1.
Examples
--------
>>> from sklearn.model_selection import TimeSeriesSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> tscv = TimeSeriesSplit(n_splits=3)
>>> print(tscv) # doctest: +NORMALIZE_WHITESPACE
TimeSeriesSplit(n_splits=3)
>>> for train_index, test_index in tscv.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [0 1] TEST: [2]
TRAIN: [0 1 2] TEST: [3]
Notes
-----
The training set has size ``i * n_samples // (n_splits + 1)
+ n_samples % (n_splits + 1)`` in the ``i``th split,
with a test set of size ``n_samples//(n_splits + 1)``,
where ``n_samples`` is the number of samples.
"""
def __init__(self, n_splits=3):
super(TimeSeriesSplit, self).__init__(n_splits,
shuffle=False,
random_state=None)
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
if n_folds > n_samples:
raise ValueError(
("Cannot have number of folds ={0} greater"
" than the number of samples: {1}.").format(n_folds,
n_samples))
indices = np.arange(n_samples)
test_size = (n_samples // n_folds)
test_starts = range(test_size + n_samples % n_folds,
n_samples, test_size)
for test_start in test_starts:
yield (indices[:test_start],
indices[test_start:test_start + test_size])
class LeaveOneGroupOut(BaseCrossValidator):
"""Leave One Group Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneGroupOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> groups = np.array([1, 1, 2, 2])
>>> logo = LeaveOneGroupOut()
>>> logo.get_n_splits(X, y, groups)
2
>>> print(logo)
LeaveOneGroupOut()
>>> for train_index, test_index in logo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
# We make a copy of groups to avoid side-effects during iteration
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if len(unique_groups) <= 1:
raise ValueError(
"The groups parameter contains fewer than 2 unique groups "
"(%s). LeaveOneGroupOut expects at least 2." % unique_groups)
for i in unique_groups:
yield groups == i
def get_n_splits(self, X, y, groups):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The groups parameter should not be None")
return len(np.unique(groups))
class LeavePGroupsOut(BaseCrossValidator):
"""Leave P Group(s) Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and LeaveOneGroupOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the groups while the latter uses samples
all assigned the same groups.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_groups : int
Number of groups (``p``) to leave out in the test split.
Examples
--------
>>> from sklearn.model_selection import LeavePGroupsOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> groups = np.array([1, 2, 3])
>>> lpgo = LeavePGroupsOut(n_groups=2)
>>> lpgo.get_n_splits(X, y, groups)
3
>>> print(lpgo)
LeavePGroupsOut(n_groups=2)
>>> for train_index, test_index in lpgo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_groups):
self.n_groups = n_groups
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if self.n_groups >= len(unique_groups):
raise ValueError(
"The groups parameter contains fewer than (or equal to) "
"n_groups (%d) numbers of unique groups (%s). LeavePGroupsOut "
"expects that at least n_groups + 1 (%d) unique groups be "
"present" % (self.n_groups, unique_groups, self.n_groups + 1))
combi = combinations(range(len(unique_groups)), self.n_groups)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=np.bool)
for l in unique_groups[np.array(indices)]:
test_index[groups == l] = True
yield test_index
def get_n_splits(self, X, y, groups):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
y : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
X, y, groups = indexable(X, y, groups)
return int(comb(len(np.unique(groups)), self.n_groups, exact=True))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
for train, test in self._iter_indices(X, y, groups):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, groups=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float, int, or None, default 0.1
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> rs = ShuffleSplit(n_splits=3, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
3
>>> print(rs)
ShuffleSplit(n_splits=3, random_state=0, test_size=0.25, train_size=None)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = ShuffleSplit(n_splits=3, train_size=0.5, test_size=.25,
... random_state=0)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class GroupShuffleSplit(ShuffleSplit):
'''Shuffle-Group(s)-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided group. This group information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and GroupShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique groups,
whereas GroupShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique groups.
For example, a less computationally intensive alternative to
``LeavePGroupsOut(p=10)`` would be
``GroupShuffleSplit(test_size=10, n_splits=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to groups, and
not to samples, as in ShuffleSplit.
Parameters
----------
n_splits : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the test split. If
int, represents the absolute number of test groups. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the train split. If
int, represents the absolute number of train groups. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, n_splits=5, test_size=0.2, train_size=None,
random_state=None):
super(GroupShuffleSplit, self).__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state)
def _iter_indices(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = check_array(groups, ensure_2d=False, dtype=None)
classes, group_indices = np.unique(groups, return_inverse=True)
for group_train, group_test in super(
GroupShuffleSplit, self)._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(group_indices, group_train))
test = np.flatnonzero(np.in1d(group_indices, group_test))
yield train, test
def _approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
Examples
--------
>>> from sklearn.model_selection._split import _approximate_mode
>>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0)
array([2, 1])
>>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0)
array([3, 1])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=0)
array([0, 1, 1, 0])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=42)
array([1, 1, 0, 0])
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
inds, = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = choice(inds, size=add_now, replace=False, random_state=rng)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(n_splits=3, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(n_splits=3, random_state=0, ...)
>>> for train_index, test_index in sss.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
n_splits, test_size, train_size, random_state)
def _iter_indices(self, X, y, groups=None):
n_samples = _num_samples(X)
y = check_array(y, ensure_2d=False, dtype=None)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of groups for any class cannot"
" be less than 2.")
if n_train < n_classes:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_test, n_classes))
rng = check_random_state(self.random_state)
for _ in range(self.n_splits):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = _approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where((y == class_i))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedShuffleSplit, self).split(X, y, groups)
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init
NOTE This does not take into account the number of samples which is known
only at split
"""
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif (np.asarray(test_size).dtype.kind == 'f' and
(train_size + test_size) > 1.):
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for train_size: %r" % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if (test_size is not None and np.asarray(test_size).dtype.kind == 'i' and
test_size >= n_samples):
raise ValueError('test_size=%d should be smaller than the number of '
'samples %d' % (test_size, n_samples))
if (train_size is not None and np.asarray(train_size).dtype.kind == 'i' and
train_size >= n_samples):
raise ValueError("train_size=%d should be smaller than the number of"
" samples %d" % (train_size, n_samples))
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for train_index, test_index in ps.split():
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = list(cv)
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv)
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=3, y=None, classifier=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(ShuffleSplit().split(X, y))`` and application to input data
into a single call for splitting (and optionally subsampling) data in a
oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the class labels.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# Ignore varargs, kw and default values and pop self
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
| bsd-3-clause |
mannickutd/learning_machine_learning | pytorch_feed_forward/feed_forward.py | 1 | 1692 | #!/usr/bin/env python
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
import torch.optim as onn
import torch.nn.functional as fnn
import torch.autograd as ann
class Net(nn.Module):
def __init__(self, n_inputs, n_hidden, n_outputs):
super(Net, self).__init__()
self.h_layer = nn.Linear(n_inputs, n_hidden)
self.o_layer = nn.Linear(n_hidden, n_outputs)
self.loss_function = nn.MSELoss()
self.optimizer = onn.Adam(self.parameters(), lr=0.001)
def forward(self, x):
h_output = fnn.sigmoid(self.h_layer(x))
o_output = fnn.sigmoid(self.o_layer(h_output))
return o_output
def initialize_network(n_inputs, n_hidden, n_outputs):
return Net(n_inputs, n_hidden, n_outputs)
def train_network(network, train, test, l_rate, n_epoch, n_outputs):
network.optimizer = onn.Adam(network.parameters(), lr=l_rate)
for epoch in range(n_epoch):
for i, x in enumerate(train):
network.optimizer.zero_grad()
input_tensor = ann.Variable(torch.Tensor([x]))
expected = [0 for i in range(n_outputs)]
expected[test[i]] = 1
expected_tensor = ann.Variable(torch.Tensor(expected))
output = network.forward(input_tensor)
loss = network.loss_function(output, expected_tensor)
loss.backward()
network.optimizer.step()
print('>epoch=%d, lrate=%.3f, error=%.3f' % (epoch, l_rate, loss.data[0]))
def predict(network, x):
network.eval()
input_tensor = ann.Variable(torch.Tensor([x]))
output = network.forward(input_tensor)
return np.argmax(output.data.numpy())
| apache-2.0 |
atavory/ibex | ibex/_function_transformer.py | 1 | 4260 | from __future__ import absolute_import
from six import string_types
import pandas as pd
from sklearn import base
from ._base import FrameMixin
from ._utils import verify_x_type, verify_y_type
__all__ = []
def _process_cols(cols):
if cols is None:
return None
return [cols] if isinstance(cols, string_types) else list(cols)
# Tmp Ami - add kw_args, inverse shit
class FunctionTransformer(base.BaseEstimator, base.TransformerMixin, FrameMixin):
"""
Transforms using a function.
Arguments:
func: One of:
* ``None``
* a callable
* a step
in_cols: One of:
* ``None``
* a string
* a list of strings
out_cols:
pass_y: Boolean indicating whether to pass the ``y`` argument to
kw_args: Keyword arguments.
Returns:
An :py:class:`sklearn.preprocessing.FunctionTransformer` object.
"""
def __init__(self, func=None, in_cols=None, out_cols=None, pass_y=None, kw_args=None):
FrameMixin.__init__(self)
params = {
'func': func,
'in_cols': in_cols,
'out_cols': out_cols,
'pass_y': pass_y,
'kw_args': kw_args,
}
self.set_params(**params)
def fit(self, X, y=None):
"""
Fits the transformer using ``X`` (and possibly ``y``).
Returns:
``self``
"""
verify_x_type(X)
verify_y_type(y)
self.x_columns = X.columns
if self.in_cols is not None:
Xt = X[self.in_cols]
else:
Xt = X
if self.func is None:
return self
if isinstance(self.func, FrameMixin):
if self.pass_y:
self.func.fit(Xt, y)
else:
self.func.fit(Xt)
return self
def fit_transform(self, X, y=None):
"""
Fits the transformer using ``X`` (and possibly ``y``), and transforms, in one
step if possible
Returns:
Transformed data.
"""
verify_x_type(X)
verify_y_type(y)
if not isinstance(self.func, FrameMixin) or not hasattr(self.func, 'fit_transform'):
if self.pass_y:
return self.fit(X, y).transform(X, y)
return self.fit(X).transform(X)
self.x_columns = X.columns
Xt = X[self.x_columns]
in_cols = _process_cols(self.in_cols)
if in_cols is not None:
Xt = Xt[in_cols]
if self.func is None:
res = Xt
elif isinstance(self.func, FrameMixin):
if self.pass_y:
res = self.func.fit_transform(Xt, y)
else:
res = self.func.fit_transform(Xt)
else:
if self.kw_args is not None:
res = pd.DataFrame(self.func(Xt, **self.kw_args), index=Xt.index)
else:
res = pd.DataFrame(self.func(Xt), index=Xt.index)
return self.__process_res(Xt, res)
def transform(self, X, y=None):
"""
Returns:
Transformed data.
"""
verify_x_type(X)
verify_y_type(y)
Xt = X[self.x_columns]
in_cols = _process_cols(self.in_cols)
if in_cols is not None:
Xt = Xt[in_cols]
if self.func is None:
res = Xt
elif isinstance(self.func, FrameMixin):
if self.pass_y:
res = self.func.transform(Xt, y)
else:
res = self.func.transform(Xt)
else:
if self.kw_args is not None:
res = pd.DataFrame(self.func(Xt, **self.kw_args), index=Xt.index)
else:
res = pd.DataFrame(self.func(Xt), index=Xt.index)
return self.__process_res(Xt, res)
def __process_res(self, Xt, res):
in_cols = _process_cols(self.in_cols)
out_cols = _process_cols(self.out_cols)
if out_cols is not None:
res_cols = out_cols
elif in_cols is not None:
res_cols = in_cols
else:
res_cols = Xt.columns
res = res.copy()
res.columns = res_cols
return res
| bsd-3-clause |
Fireblend/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 126 | 7477 | r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
Akshay0724/scikit-learn | examples/ensemble/plot_random_forest_embedding.py | 47 | 3599 | """
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data with truncated SVD.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result after dimensionality reduction using truncated SVD
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("Truncated SVD reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
| bsd-3-clause |
Huyuwei/tvm | tutorials/frontend/from_onnx.py | 2 | 3917 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Compile ONNX Models
===================
**Author**: `Joshua Z. Zhang <https://zhreshold.github.io/>`_
This article is an introductory tutorial to deploy ONNX models with Relay.
For us to begin with, ONNX package must be installed.
A quick solution is to install protobuf compiler, and
.. code-block:: bash
pip install onnx --user
or please refer to offical site.
https://github.com/onnx/onnx
"""
import onnx
import numpy as np
import tvm
import tvm.relay as relay
from tvm.contrib.download import download_testdata
######################################################################
# Load pretrained ONNX model
# ---------------------------------------------
# The example super resolution model used here is exactly the same model in onnx tutorial
# http://pytorch.org/tutorials/advanced/super_resolution_with_caffe2.html
# we skip the pytorch model construction part, and download the saved onnx model
model_url = ''.join(['https://gist.github.com/zhreshold/',
'bcda4716699ac97ea44f791c24310193/raw/',
'93672b029103648953c4e5ad3ac3aadf346a4cdc/',
'super_resolution_0.2.onnx'])
model_path = download_testdata(model_url, 'super_resolution.onnx', module='onnx')
# now you have super_resolution.onnx on disk
onnx_model = onnx.load(model_path)
######################################################################
# Load a test image
# ---------------------------------------------
# A single cat dominates the examples!
from PIL import Image
img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true'
img_path = download_testdata(img_url, 'cat.png', module='data')
img = Image.open(img_path).resize((224, 224))
img_ycbcr = img.convert("YCbCr") # convert to YCbCr
img_y, img_cb, img_cr = img_ycbcr.split()
x = np.array(img_y)[np.newaxis, np.newaxis, :, :]
######################################################################
# Compile the model with relay
# ---------------------------------------------
target = 'llvm'
input_name = '1'
shape_dict = {input_name: x.shape}
mod, params = relay.frontend.from_onnx(onnx_model, shape_dict)
with relay.build_config(opt_level=1):
intrp = relay.build_module.create_executor('graph', mod, tvm.cpu(0), target)
######################################################################
# Execute on TVM
# ---------------------------------------------
dtype = 'float32'
tvm_output = intrp.evaluate()(tvm.nd.array(x.astype(dtype)), **params).asnumpy()
######################################################################
# Display results
# ---------------------------------------------
# We put input and output image neck to neck
from matplotlib import pyplot as plt
out_y = Image.fromarray(np.uint8((tvm_output[0, 0]).clip(0, 255)), mode='L')
out_cb = img_cb.resize(out_y.size, Image.BICUBIC)
out_cr = img_cr.resize(out_y.size, Image.BICUBIC)
result = Image.merge('YCbCr', [out_y, out_cb, out_cr]).convert('RGB')
canvas = np.full((672, 672*2, 3), 255)
canvas[0:224, 0:224, :] = np.asarray(img)
canvas[:, 672:, :] = np.asarray(result)
plt.imshow(canvas.astype(np.uint8))
plt.show()
| apache-2.0 |
LevinJ/Supply-demand-forecasting | implement/svmregressionmodel.py | 1 | 1189 | import sys
import os
sys.path.insert(0, os.path.abspath('..'))
from utility.sklearnbasemodel import BaseModel
import numpy as np
from sklearn.svm import SVR
from sklearn import preprocessing
from sklearn.pipeline import Pipeline
class SVMRegressionModel(BaseModel):
def __init__(self):
BaseModel.__init__(self)
# self.usedFeatures = [1,4,5,6,7]
self.randomSate = None
# self.excludeZerosActual = True
# self.test_size = 0.3
return
def setClf(self):
clf = SVR(C=100, epsilon=0.1, gamma = 0.0001,cache_size = 10240)
min_max_scaler = preprocessing.MinMaxScaler()
self.clf = Pipeline([('scaler', min_max_scaler), ('estimator', clf)])
return
def getTunedParamterOptions(self):
# tuned_parameters = [{'estimator__C': [0.001,0.01,0.1, 1,10,100,1000, 10000],
# 'estimator__gamma':[0.00001, 0.0001, 0.001,0.003, 0.01,0.1, 1,10,100,1000,10000]}]
tuned_parameters = [{'estimator__C': [1,10],
'estimator__gamma':[0.00001]}]
return tuned_parameters
if __name__ == "__main__":
obj= SVMRegressionModel()
obj.run() | mit |
jmargeta/scikit-learn | sklearn/cluster/tests/test_spectral.py | 2 | 9100 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
from sklearn.metrics.pairwise import kernel_metrics
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances, adjusted_rand_score
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_lobpcg_mode():
# Test the lobpcg mode of SpectralClustering
# We need a fairly big data matrix, as lobpcg does not work with
# small data matrices
centers = np.array([
[0., 0.],
[10., 10.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=.1, random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="lobpcg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
# We need a large matrice, or the lobpcg solver will fallback to its
# non-sparse and buggy mode
S = np.array([[1, 5, 2, 2, 1, 0, 0, 0, 0, 0],
[5, 1, 3, 2, 1, 0, 0, 0, 0, 0],
[2, 3, 1, 1, 1, 0, 0, 0, 0, 0],
[2, 2, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 2, 1, 1, 1],
[0, 0, 0, 0, 1, 2, 2, 3, 3, 2],
[0, 0, 0, 0, 2, 2, 3, 3, 3, 4],
[0, 0, 0, 0, 1, 3, 3, 1, 2, 4],
[0, 0, 0, 0, 1, 3, 3, 2, 1, 4],
[0, 0, 0, 0, 1, 2, 4, 4, 4, 1],
])
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
if labels[0] == 0:
labels = 1 - labels
assert_greater(np.mean(labels == [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]), .89)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=40, random_state=2,
centers=[[1, 1], [-1, -1]], cluster_std=0.4)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern, random_state=0)
labels = sp.fit(X).labels_
print(labels)
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
"""Histogram kernel implemented as a callable."""
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.todense()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
Fireblend/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 274 | 3790 | # Authors: Lars Buitinck <[email protected]>
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
sid88in/incubator-airflow | airflow/operators/generic_transfer.py | 13 | 3125 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.hooks.base_hook import BaseHook
class GenericTransfer(BaseOperator):
"""
Moves data from a connection to another, assuming that they both
provide the required methods in their respective hooks. The source hook
needs to expose a `get_records` method, and the destination a
`insert_rows` method.
This is meant to be used on small-ish datasets that fit in memory.
:param sql: SQL query to execute against the source database. (templated)
:type sql: str
:param destination_table: target table. (templated)
:type destination_table: str
:param source_conn_id: source connection
:type source_conn_id: str
:param destination_conn_id: source connection
:type destination_conn_id: str
:param preoperator: sql statement or list of statements to be
executed prior to loading the data. (templated)
:type preoperator: str or list of str
"""
template_fields = ('sql', 'destination_table', 'preoperator')
template_ext = ('.sql', '.hql',)
ui_color = '#b0f07c'
@apply_defaults
def __init__(
self,
sql,
destination_table,
source_conn_id,
destination_conn_id,
preoperator=None,
*args, **kwargs):
super(GenericTransfer, self).__init__(*args, **kwargs)
self.sql = sql
self.destination_table = destination_table
self.source_conn_id = source_conn_id
self.destination_conn_id = destination_conn_id
self.preoperator = preoperator
def execute(self, context):
source_hook = BaseHook.get_hook(self.source_conn_id)
self.log.info("Extracting data from %s", self.source_conn_id)
self.log.info("Executing: \n %s", self.sql)
results = source_hook.get_records(self.sql)
destination_hook = BaseHook.get_hook(self.destination_conn_id)
if self.preoperator:
self.log.info("Running preoperator")
self.log.info(self.preoperator)
destination_hook.run(self.preoperator)
self.log.info("Inserting rows into %s", self.destination_conn_id)
destination_hook.insert_rows(table=self.destination_table, rows=results)
| apache-2.0 |
DistrictDataLabs/yellowbrick | yellowbrick/text/correlation.py | 1 | 12148 | # yellowbrick.text.correlation
# Implementation of word correlation for text visualization.
#
# Author: Patrick Deziel
# Created: Sun May 1 19:43:41 2022 -0600
#
# Copyright (C) 2022 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: correlation.py [b652fc9] [email protected] $
"""
Implementation of word correlation for text visualization.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from yellowbrick.style import find_text_color
from yellowbrick.text.base import TextVisualizer
from yellowbrick.style.palettes import color_sequence
from yellowbrick.exceptions import YellowbrickValueError
##########################################################################
## Word Correlation Plot Visualizer
##########################################################################
class WordCorrelationPlot(TextVisualizer):
"""
Word correlation illustrates the extent to which words in a corpus appear in the
same documents.
WordCorrelationPlot visualizes the binary correlation between words across
documents as a heatmap. The correlation is defined using the mean square
contingency coefficient (phi-coefficient) between any two words m and n. The
coefficient is a value between -1 and 1, inclusive. A value close to 1 or -1
indicates strong positive or negative correlation between m and n, while a value
close to 0 indicates little or no correlation. The constructor takes one required
argument, which is the list of words or n-grams to be plotted.
Parameters
----------
words : list of str
The list of words or n-grams to be plotted. The words must be present in the
provided corpus on fit().
ignore_case : bool, default: False
If True, all words will be converted to lowercase before processing.
ax : matplotlib Axes, default: None
The axes to plot the figure on.
cmap : str or cmap, default: "RdYlBu"
Colormap to use for the heatmap.
colorbar : bool, default: True
If True, a colorbar will be added to the heatmap.
fontsize : int, default: None
Font size to use for the labels on the axes.
kwargs : dict
Pass any additional keyword arguments to the super class.
Attributes
----------
self.doc_term_matrix_ : array of shape (n_docs, n_features)
The computed sparse document-term matrix containing binary values indicating if
a word is present in a document.
self.num_docs_ : int
The number of observed documents in the corpus.
self.vocab_ : dict
A dictionary mapping words to their indices in the document-term matrix.
self.num_features_ : int
The number of features (word labels) in the resulting plot.
self.correlation_matrix_ : ndarray of shape (n_features, n_features)
The computed matrix containing the phi-coefficients between all features.
"""
def __init__(
self,
words,
ignore_case=False,
ax=None,
cmap="RdYlBu",
colorbar=True,
fontsize=None,
**kwargs
):
super(WordCorrelationPlot, self).__init__(ax=ax, **kwargs)
# Visual parameters
self.fontsize = fontsize
self.colorbar = colorbar
self.cmap = color_sequence(cmap)
# Fitting parameters
self.ignore_case = ignore_case
self.words = self._construct_terms(words, ignore_case)
self.ngram_range = self._compute_ngram_range()
def _construct_terms(self, words, ignore_case):
"""
Constructs the list of terms to be plotted based on the provided words. This
performs input checking and removes duplicates to produce a list of valid words
for fitting.
"""
# Remove surrounding whitespace
terms = [word.strip() for word in words if len(word.strip()) > 0]
if len(terms) == 0:
raise YellowbrickValueError("Must provide at least one word to plot.")
# Convert to lowercase if ignore_case is set
if ignore_case:
terms = [word.lower() for word in terms]
# Sort and remove duplicates
return sorted(set(terms))
def _compute_ngram_range(self):
"""
Computes the n-gram range to use for vectorization based on the provided words.
This allows the user to specify multi-word terms for plotting.
"""
ngrams = [len(word.split()) for word in self.words]
return (min(ngrams), max(ngrams))
def _compute_coefficient(self, m, n):
"""
Computes the phi-coefficient for two words m and n, which is a correlation
value between -1 and 1 inclusive.
"""
m_col = self.doc_term_matrix_.getcol(self.vocab_[m])
n_col = self.doc_term_matrix_.getcol(self.vocab_[n])
both = m_col.multiply(n_col).sum()
m_total = m_col.sum()
n_total = n_col.sum()
only_m = m_total - both
only_n = n_total - both
neither = self.num_docs_ - both - only_m - only_n
return ((both * neither) - (only_m * only_n)) / np.sqrt(m_total * n_total * (self.num_docs_ - m_total) * (self.num_docs_ - n_total))
def fit(self, X, y=None):
"""
The fit method is the primary drawing input for the word correlation
visualization.
Parameters
----------
X : list of str or generator
Should be provided as a list of strings or a generator yielding strings
that represent the documents in the corpus.
y : None
Labels are not used for the word correlation visualization.
Returns
-------
self: instance
Returns the instance of the transformer/visualizer.
Attributes
----------
self.doc_term_matrix_ : array of shape (n_docs, n_features)
The computed sparse document-term matrix containing binary values
indicating if a word is present in a document.
self.num_docs_ : int
The number of observed documents in the corpus.
self.vocab_ : dict
A dictionary mapping words to their indices in the document-term matrix.
self.num_features_ : int
The number of features (word labels) in the resulting plot.
self.correlation_matrix_ : ndarray of shape (n_features, n_features)
The computed matrix containing the phi-coefficients between all features.
"""
# Instantiate the CountVectorizer
vecs = CountVectorizer(
vocabulary=self.words,
lowercase=self.ignore_case,
ngram_range=self.ngram_range,
binary=True
)
# Get the binary document counts for the target words
self.doc_term_matrix_ = vecs.fit_transform(X)
self.num_docs_ = self.doc_term_matrix_.shape[0]
self.vocab_ = vecs.vocabulary_
# Verify that all target words exist in the corpus
for word in self.words:
if self.doc_term_matrix_.getcol(self.vocab_[word]).sum() == 0:
raise YellowbrickValueError("Word '{}' does not exist in the corpus.".format(word))
# Compute the phi-coefficient for each pair of words
self.num_features_ = len(self.words)
self.correlation_matrix_ = np.zeros((self.num_features_, self.num_features_))
for i, m in enumerate(self.words):
for j, n in enumerate(self.words):
self.correlation_matrix_[i, j] = self._compute_coefficient(m, n)
self.draw(X)
return self
def draw(self, X):
"""
Called from the fit() method, this metod draws the heatmap on the figure using
the computed correlation matrix.
"""
# Use correlation matrix data for the heatmap
wc_display = self.correlation_matrix_
# Set up the dimensions of the pcolormesh
X, Y = np.arange(self.num_features_ + 1), np.arange(self.num_features_ + 1)
self.ax.set_ylim(bottom=0, top=wc_display.shape[0])
self.ax.set_xlim(left=0, right=wc_display.shape[1])
# Set the words as the tick labels on the plot. The Y-axis is sorted from top
# to bottom, the X-axis is sorted from left to right.
xticklabels = self.words
yticklabels = self.words[::-1]
ticks = np.arange(self.num_features_) + 0.5
self.ax.set(xticks=ticks, yticks=ticks)
self.ax.set_xticklabels(xticklabels, rotation="vertical", fontsize=self.fontsize)
self.ax.set_yticklabels(yticklabels, fontsize=self.fontsize)
# Flip the Y-axis values so that they match the sorted labels
wc_display = np.flipud(wc_display)
# Draw the labels in each heatmap cell
for x in X[:-1]:
for y in Y[:-1]:
# Get the correlation value for the cell
value = wc_display[x, y]
svalue = "{:.2f}".format(value)
# Get a compatible text color for the cell
base_color = self.cmap(value / 2 + 0.5)
text_color = find_text_color(base_color)
# Draw the text at the center of the cell
# Note: x and y coordinates are swapped to match the pcolormesh
cx, cy = y + 0.5, x + 0.5
self.ax.text(cx, cy, svalue, va="center", ha="center", color=text_color, fontsize=self.fontsize)
# Draw the heatmap
g = self.ax.pcolormesh(X, Y, wc_display, cmap=self.cmap, vmin=-1, vmax=1)
# Add the color bar
if self.colorbar:
self.ax.figure.colorbar(g, ax=self.ax)
return self.ax
def finalize(self):
"""
Prepares the figure for rendering by adding the title. This method is usually
called from show() and not directly by the user.
"""
self.set_title("Word Correlation Plot")
self.fig.tight_layout()
##########################################################################
## Quick Method
##########################################################################
def word_correlation(
words,
corpus,
ignore_case=True,
ax=None,
cmap="RdYlBu",
show=True,
colorbar=True,
fontsize=None,
**kwargs
):
"""Word Correlation
Displays the binary correlation between the given words across the documents in a
corpus.
For a list of words with length n, this produces an n x n heatmap of correlation
values in the range [-1, 1].
Parameters
----------
words : list of str
The corpus words to display in the heatmap.
corpus : list of str or generator
The corpus as a list of documents or a generator yielding documents.
ignore_case : bool, default: True
If True, all words will be converted to lowercase before proessing.
ax : matplotlib axes, default: None
The axes to plot the figure on.
cmap : str, default: "RdYlBu"
Colormap to use for the heatmap.
show : bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however
you cannot call ``plt.savefig`` from this signature, nor
``clear_figure``. If False, simply calls ``finalize()``
colorbar : bool, default: True
If True, adds a colorbar to the figure.
fontsize : int, default: None
If not None, sets the font size of the labels.
"""
# Instantiate the visualizer
visualizer = WordCorrelationPlot(
words=words,
lowercase=ignore_case,
ax=ax,
cmap=cmap,
colorbar=colorbar,
fontsize=fontsize,
**kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(corpus)
# Draw the final visualization
if show:
visualizer.show()
else:
visualizer.finalize()
# Return the visualizer
return visualizer | apache-2.0 |
Jsonzhang/kaggle | digitRecognizer/digitRecognizerMLP.py | 1 | 1407 | import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.ensemble import RandomForestClassifier
import theano as theano
from sklearn.neural_network import MLPClassifier
data = pd.read_csv('./data/train.csv', index_col=False)
test_data = pd.read_csv('./data/test.csv', index_col=False)
y = data['label']
X = data.drop('label', axis=1)
X_target = test_data
# .loc[:, ~(data == 0).all()]
newX = X.applymap(lambda x: 0 if x == 0 else 1)
X_target = X_target.applymap(lambda x: 0 if x == 0 else 1)
X_target.index += 1
X_train, X_test, y_train, y_test = train_test_split(newX, y, test_size=0.05, random_state=True)
cols = list(X_train)
nunique = X_train.apply(pd.Series.nunique)
cols_to_drop = nunique[nunique == 1].index
X_train = X_train.drop(cols_to_drop, axis=1)
X_test = X_test.drop(cols_to_drop, axis=1)
X_target = X_target.drop(cols_to_drop, axis=1)
# logic = GridSearchCV(estimator=RandomForestClassifier(), param_grid=forrest_params, cv=5)
# logic = LogisticRegression()
logic = MLPClassifier()
logic.fit(X_train, y_train)
result = logic.predict(X_target)
submission = pd.DataFrame({
"ImageId": X_target.index,
"Label": result
})
submission.to_csv('./submission.csv', index=False)
print(logic.score(X_train, y_train))
print(logic.score(X_test, y_test))
"""
0.929097744361
0.906666666667
"""
| mit |
Akshay0724/scikit-learn | examples/model_selection/plot_validation_curve.py | 135 | 1931 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.model_selection import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
ningchi/scikit-learn | sklearn/ensemble/__init__.py | 216 | 1307 | """
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
| bsd-3-clause |
thientu/scikit-learn | sklearn/setup.py | 224 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
MaestroGraph/sparse-hyper | scripts/generate.mnist.py | 1 | 5199 | import torch
import torchvision
from torchvision.transforms import ToTensor
from _context import sparse
from sparse import util
from tqdm import tqdm
import numpy as np
import random, sys
from PIL import Image
from argparse import ArgumentParser
from math import ceil
from collections import Counter
"""
Generate rotated, and scaled version of MNIST
"""
def paste(background, foreground, scale=[1.5, 2.0]):
rh, rw = background.size
sch, scw = scale
new_size = (int(ceil(foreground.size[0] * sch)), int(ceil(foreground.size[1] * scw)))
try:
foreground = foreground.resize(new_size, resample=Image.BICUBIC)
except:
print(f'pasting with size {new_size} failed. ({(sch, scw)})')
sys.exit()
# Rotate the foreground
angle_degrees = random.randint(0, 359)
foreground = foreground.rotate(angle_degrees, resample=Image.BICUBIC, expand=True)
h, w = foreground.size
h, w = rh - h, rw - w
h, w = random.randint(0, h), random.randint(0, w)
background.paste(foreground, box=(h, w), mask=foreground)
def make_image(b, images, res=100, noise=10, scale=[1.0, 2.0], aspect=2.0):
"""
Extract the b-th image from the batch of images, and place it into a 100x100 image, rotated and scaled
with noise extracted from other images.
:param b:
:param images:
:param res:
:return:
"""
# Sample a uniform scale for the noise and target
sr = scale[1] - scale[0]
ar = aspect - 1.0
sc = random.random() * sr + scale[0]
ap = random.random() * ar + 1.0
sch, scw = sc, sc
if random.choice([True, False]):
sch *= ap
else:
scw *= ap
background = Image.new(mode='RGB', size=(res, res))
# generate random patch size
nm = 14
nh, nw = random.randint(4, nm), random.randint(4, nm)
# Paste noise
for i in range(noise):
# select another image
ind = random.randint(0, images.size(0)-2)
if ind == b:
ind += 1
# clip out a random nh x nw patch
h, w = random.randint(0, 28-nh), random.randint(0, 28-nw)
nump = (images[ind, 0, h:h+nh, h:h+nw].numpy() * 255).astype('uint8').squeeze()
patch = Image.fromarray(nump)
paste(background, patch, scale=(sch, scw))
# Paste image
nump = (images[b, 0, :, :].numpy() * 255).astype('uint8').squeeze()
foreground = Image.fromarray(nump)
paste(background, foreground, scale=(sch, scw))
return background
def go(arg):
# make directories
for i in range(10):
util.makedirs('./mnist-rsc/train/{}/'.format(i))
util.makedirs('./mnist-rsc/test/{}/'.format(i))
train = torchvision.datasets.MNIST(root=arg.data, train=True, download=True, transform=ToTensor())
trainloader = torch.utils.data.DataLoader(train, batch_size=arg.batch, shuffle=True, num_workers=2)
test = torchvision.datasets.MNIST(root=arg.data, train=False, download=True, transform=ToTensor())
testloader = torch.utils.data.DataLoader(test, batch_size=arg.batch, shuffle=True, num_workers=2)
indices = Counter()
for images, labels in tqdm(trainloader):
batch_size = labels.size(0)
for b in range(batch_size):
image = make_image(b, images, res=arg.res, noise=arg.noise, scale=arg.scale)
label = int(labels[b].item())
image.save('./mnist-rsc/train/{}/{:06}.png'.format(label, indices[label]))
indices[label] += 1
indices = Counter()
for images, labels in tqdm(testloader):
batch_size = labels.size(0)
for b in range(batch_size):
image = make_image(b, images, res=arg.res, noise=arg.noise, scale=arg.scale)
label = int(labels[b].item())
image.save('./mnist-rsc/test/{}/{:06}.png'.format(label, indices[label]))
indices[label] += 1
if __name__ == "__main__":
## Parse the command line options
parser = ArgumentParser()
parser.add_argument("-D", "--data", dest="data",
help="Data directory",
default='./data/')
parser.add_argument("-b", "--batch-size",
dest="batch",
help="The batch size.",
default=256, type=int)
parser.add_argument("-r", "--resolution",
dest="res",
help="Resolution (one side, images are always square).",
default=100, type=int)
parser.add_argument("-n", "--noise",
dest="noise",
help="Number of noise patches to add.",
default=10, type=int)
parser.add_argument("-s", "--scale",
dest="scale",
help="Min/max scale multiplier.",
nargs=2,
default=[1.0, 2.0], type=float)
parser.add_argument("-a", "--aspect",
dest="aspect",
help="Min/max aspect multiplier.",
default=2.0, type=float)
options = parser.parse_args()
print('OPTIONS ', options)
go(options) | mit |
scr4t/rep | rep/estimators/neurolab.py | 3 | 12768 | """
These classes are wrappers for `neurolab <https://pythonhosted.org/neurolab/lib.html>`_ - neural network python library
.. warning:: To make neurolab reproducible we change global random seed
::
numpy.random.seed(42)
"""
# Copyright 2014-2015 Yandex LLC and contributors <https://yandex.com/>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# <http://www.apache.org/licenses/LICENSE-2.0>
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
from abc import ABCMeta
from copy import deepcopy
import neurolab as nl
import numpy
import scipy
from .interface import Classifier, Regressor
from .utils import check_inputs, check_scaler, one_hot_transform, remove_first_line
__author__ = 'Vlad Sterzhanov, Alex Rogozhnikov, Tatiana Likhomanenko'
__all__ = ['NeurolabBase', 'NeurolabClassifier', 'NeurolabRegressor']
NET_TYPES = {'feed-forward': nl.net.newff,
'single-layer': nl.net.newp,
'competing-layer': nl.net.newc,
'learning-vector': nl.net.newlvq,
'elman-recurrent': nl.net.newelm,
'hemming-recurrent': nl.net.newhem,
'hopfield-recurrent': nl.net.newhop}
NET_PARAMS = ('minmax', 'cn', 'layers', 'transf', 'target',
'max_init', 'max_iter', 'delta', 'cn0', 'pc')
BASIC_PARAMS = ('layers', 'net_type', 'trainf', 'initf', 'scaler', 'random_state')
# Instead of single layer use feed-forward.
CANT_CLASSIFY = ('hopfield-recurrent', 'competing-layer', 'hemming-recurrent', 'single-layer')
class NeurolabBase(object):
"""Base class for estimators from Neurolab library.
Parameters:
-----------
:param features: features used in training
:type features: list[str] or None
:param list[int] layers: sequence, number of units inside each **hidden** layer.
:param string net_type: type of network
One of 'feed-forward', 'single-layer', 'competing-layer', 'learning-vector',
'elman-recurrent', 'hopfield-recurrent', 'hemming-recurrent'
:param initf: layer initializers
:type initf: anything implementing call(layer), e.g. nl.init.* or list[nl.init.*] of shape [n_layers]
:param trainf: net train function, default value depends on type of network
:param scaler: transformer to apply to the input objects
:type scaler: str or sklearn-like transformer or False (do not scale features)
:param random_state: ignored, added for uniformity.
:param dict kwargs: additional arguments to net __init__, varies with different net_types
.. seealso:: https://pythonhosted.org/neurolab/lib.html for supported train functions and their parameters.
"""
__metaclass__ = ABCMeta
def __init__(self,
features=None,
layers=(10,),
net_type='feed-forward',
initf=nl.init.init_rand,
trainf=None,
scaler='standard',
random_state=None,
**other_params):
self.features = list(features) if features is not None else features
self.layers = list(layers)
self.trainf = trainf
self.initf = initf
self.net_type = net_type
self.scaler = scaler
self.random_state = random_state
self.net = None
self.train_params = {}
self.net_params = {}
self.set_params(**other_params)
def is_fitted(self):
"""
Check if net is fitted
:return: If estimator was fitted
:rtype: bool
"""
return self.net is not None
def set_params(self, **params):
"""
Set the parameters of this estimator.
:param dict params: parameters to set in model
"""
for name, value in params.items():
if name.startswith("scaler__"):
assert hasattr(self.scaler, 'set_params'), \
"Trying to set {} without scaler".format(name)
self.scaler.set_params(**{name[len("scaler__"):]: value})
elif name.startswith('layers__'):
index = int(name[len('layers__'):])
self.layers[index] = value
elif name.startswith('initf__'):
index = int(name[len('initf__'):])
self.initf[index] = value
elif name in NET_PARAMS:
self.net_params[name] = value
elif name in BASIC_PARAMS:
setattr(self, name, value)
else:
self.train_params[name] = value
def get_params(self, deep=True):
"""
Get parameters of this estimator
:rtype: dict
"""
parameters = deepcopy(self.net_params)
parameters.update(deepcopy(self.train_params))
for name in BASIC_PARAMS:
parameters[name] = getattr(self, name)
return parameters
def _partial_fit(self, X, y_original, y_train):
"""
y_train is always 2-dimensional (one-hot for classification)
y_original is what originally was passed to `fit`.
"""
# magic reproducibilizer
numpy.random.seed(42)
if self.is_fitted():
x_train = self._transform_input(X, y_original, fit=False)
else:
x_train = self._transform_input(X, y_original, fit=True)
# Prepare parameters depending on network purpose (classification / regression)
net_params = self._prepare_params(self.net_params, x_train, y_train)
initializer = self._get_initializer(self.net_type)
net = initializer(**net_params)
# To allow similar initf function on all layers
initf_iterable = self.initf if hasattr(self.initf, '__iter__') else [self.initf] * len(net.layers)
for layer, init_function in zip(net.layers, initf_iterable):
layer.initf = init_function
net.init()
if self.trainf is not None:
net.trainf = self.trainf
self.net = net
self.net.train(x_train, y_train, **self.train_params)
return self
def _sim(self, X):
assert self.net is not None, 'Classifier not fitted, prediction denied'
transformed_x = self._transform_input(X, fit=False)
return self.net.sim(transformed_x)
def _transform_input(self, X, y=None, fit=True):
X = self._get_features(X)
# The following line fights the bug in sklearn < 0.16,
# most of transformers there modify X if it is pandas.DataFrame.
X = numpy.copy(X)
if fit:
self.scaler = check_scaler(self.scaler)
self.scaler.fit(X, y)
X = self.scaler.transform(X)
# HACK: neurolab requires all features (even those of predicted objects) to be in [min, max]
# so this dark magic appeared, seems to work ok for most reasonable use-cases,
# while allowing arbitrary inputs.
return scipy.special.expit(X / 3)
def _prepare_params(self, net_params, x_train, y_train):
net_params = deepcopy(net_params)
# Network expects features to be [0, 1]-scaled
net_params['minmax'] = [[0, 1]] * (x_train.shape[1])
# To unify the layer-description argument with other supported networks
if not net_params.has_key('size'):
net_params['size'] = self.layers
else:
if self.layers != (10, ):
raise ValueError('For neurolab please use either `layers` or `sizes`, not both')
# Set output layer size
net_params['size'] = list(net_params['size']) + [y_train.shape[1]]
# Default parameters for transfer functions in classifier networks
if 'transf' not in net_params:
net_params['transf'] = [nl.trans.TanSig()] * len(net_params['size'])
if not hasattr(net_params['transf'], '__iter__'):
net_params['transf'] = [net_params['transf']] * len(net_params['size'])
net_params['transf'] = list(net_params['transf'])
return net_params
@staticmethod
def _get_initializer(net_type):
if net_type not in NET_TYPES:
raise AttributeError("Got unexpected network type: '{}'".format(net_type))
return NET_TYPES.get(net_type)
class NeurolabClassifier(NeurolabBase, Classifier):
__doc__ = "Classifier from neurolab library. \n" + remove_first_line(NeurolabBase.__doc__)
def fit(self, X, y):
"""
Train the classifier
:param pandas.DataFrame X: data shape [n_samples, n_features]
:param y: labels of events - array-like of shape [n_samples]
:param sample_weight: weight of events,
array-like of shape [n_samples] or None if all weights are equal
:return: self
"""
# erasing results of previous training
self.net = None
return self.partial_fit(X, y)
def partial_fit(self, X , y):
"""
Additional training of the classifier
:param pandas.DataFrame X: data shape [n_samples, n_features]
:param y: labels of events - array-like of shape [n_samples]
:return: self
"""
assert self.net_type not in CANT_CLASSIFY, 'Network type does not support classification'
X, y, _ = check_inputs(X, y, None)
if not self.is_fitted():
self._set_classes(y)
y_train = one_hot_transform(y, n_classes=len(self.classes_)) * 0.98 + 0.01
return self._partial_fit(X, y, y_train)
def predict_proba(self, X):
"""
Predict labels for all events in dataset
:param X: pandas.DataFrame of shape [n_samples, n_features]
:rtype: numpy.array of shape [n_samples] with integer labels
"""
return self._sim(X)
def staged_predict_proba(self, X):
"""
.. warning:: not supported in Neurolab (**AttributeError** will be thrown)
"""
raise AttributeError("staged_predict_proba is not supported by Neurolab networks")
def _prepare_params(self, params, x_train, y_train):
net_params = super(NeurolabClassifier, self)._prepare_params(params, x_train, y_train)
# Classification networks should have SoftMax as the transfer function on output layer
net_params['transf'][-1] = nl.trans.SoftMax()
return net_params
class NeurolabRegressor(NeurolabBase, Regressor):
__doc__ = "Regressor from neurolab library. \n" + remove_first_line(NeurolabBase.__doc__)
def fit(self, X, y):
"""
Train the classifier
:param pandas.DataFrame X: data shape [n_samples, n_features]
:param y: labels of events - array-like of shape [n_samples]
:param sample_weight: weight of events,
array-like of shape [n_samples] or None if all weights are equal
:return: self
"""
# erasing results of previous training
self.net = None
return self.partial_fit(X, y)
def partial_fit(self, X , y):
"""
Additional training of the classifier
:param pandas.DataFrame X: data shape [n_samples, n_features]
:param y: labels of events - array-like of shape [n_samples]
:return: self
"""
assert self.net_type not in CANT_CLASSIFY, 'Network type does not support regression'
X, y, _ = check_inputs(X, y, None, allow_multiple_targets=True)
y_train = y.reshape(len(y), 1 if len(y.shape) == 1 else y.shape[1])
return self._partial_fit(X, y, y_train)
def predict(self, X):
"""
Predict values for all events in dataset.
:param X: pandas.DataFrame of shape [n_samples, n_features]
:rtype: numpy.array of shape [n_samples] with predicted values
"""
modeled = self._sim(X)
return modeled if modeled.shape[1] != 1 else numpy.ravel(modeled)
def staged_predict(self, X, step=10):
"""
.. warning:: not supported in Neurolab (**AttributeError** will be thrown)
"""
raise AttributeError("Staged predict is not supported by Neurolab networks")
def _prepare_params(self, params, x_train, y_train):
net_params = super(NeurolabRegressor, self)._prepare_params(params, x_train, y_train)
net_params['transf'][-1] = nl.trans.PureLin()
return net_params
| apache-2.0 |
DistrictDataLabs/yellowbrick | yellowbrick/features/base.py | 1 | 19690 | # yellowbrick.features.base
# Base classes for feature visualizers and feature selection tools.
#
# Author: Rebecca Bilbro
# Author: Benjamin Bengfort
# Created: Fri Oct 07 13:41:24 2016 -0400
#
# Copyright (C) 2016 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: base.py [2e898a6] [email protected] $
"""
Base classes for feature visualizers and feature selection tools.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
import matplotlib as mpl
from yellowbrick.base import Visualizer
from yellowbrick.utils import is_dataframe
from yellowbrick.style import resolve_colors
from yellowbrick.exceptions import NotFitted
from yellowbrick.utils.target import target_color_type, TargetType
from yellowbrick.exceptions import YellowbrickKeyError, YellowbrickValueError
from yellowbrick.style import palettes
from matplotlib.colors import Normalize
from sklearn.base import TransformerMixin
##########################################################################
## Feature Visualizers
##########################################################################
class FeatureVisualizer(Visualizer, TransformerMixin):
"""Base class for feature visualization.
Feature engineering is primarily conceptualized as a transformation or
extraction operation, e.g. some raw data is passed through a series of
transformers and mappings to result in some final dataset which can be
directly fitted to a model. Therefore feature visualizers are
transformers and support the sklearn transformer interface by implementing
a transform method.
Subclasses of the FeatureVisualizer may call draw either from fit or from
transform but must implement both so that they can be supported in pipeline
objects. By default, the transform method of the visualizer is just a data
pass through that ensures the visualizer can be placed into a feature
extraction workflow.
Parameters
----------
ax : matplotlib.Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
fig : matplotlib Figure, default: None
The figure to plot the Visualizer on. If None is passed in the current
plot will be used (or generated if required).
kwargs : dict
Any additional keyword arguments to pass to the base Visualizer.
"""
def __init__(self, ax=None, fig=None, **kwargs):
super(FeatureVisualizer, self).__init__(ax=ax, fig=fig, **kwargs)
def transform(self, X, y=None):
"""
A pass-through to ensure that feature visualizers work in Pipelines.
Subclasses may override this method to actually transform data or to
call drawing methods. The transformer may also take an optional y
argument if it is required for either transformation or drawing.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature dataset to be transformed.
y : array-like, shape (n_samples,)
Dependent target data associated with X, unused.
Returns
-------
X : array-like, shape (n_samples, n_features)
Returns the original dataset, unmodified.
"""
return X
def fit_transform_show(self, X, y=None, **kwargs):
"""Fit, transform, then visualize data in one step.
A helper method similar to ``fit_transform`` that allows you to fit,
transform, and create a visualization in one simple step. Returns a
transformed dataset similar to ``fit_transform``.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature dataset for both training and transformation.
y : array-like, shape (n_samples,)
Dependent target dataset optionally used for training.
kwargs : dict, optional
Keyword arguments to pass to the ``show()`` method.
Returns
-------
Xp : array-like, shape (m_samples, m_features)
The transformed dataset X prime.
"""
Xp = self.fit_transform(X, y)
self.show(**kwargs)
return Xp
class MultiFeatureVisualizer(FeatureVisualizer):
"""Direct visualization of a feature set.
MultiFeatureVisualiers visualize several features at once, usually in order
to compare the effectiveness of a subset of features to the superset. This
type of visualizer provides base functionality for identifying the names of
the features either directly from the data or from user supplied values. It
also provides other functionality for feature selection, e.g. ensuring that
a subset of features is used if specified by the user.
Parameters
----------
ax : matplotlib.Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
fig : matplotlib Figure, default: None
The figure to plot the Visualizer on. If None is passed in the current
plot will be used (or generated if required).
features : list, default: None
The names of the features specified by the columns of the input dataset.
This length of this list must match the number of columns in X, otherwise
an exception will be raised on ``fit()``.
kwargs : dict, optional
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
features_ : ndarray, shape (n_features,)
The names of the features discovered or used in the visualizer that
can be used as an index to access or modify data in X. If a user passes
feature names in, those features are used. Otherwise the columns of a
DataFrame are used or just simply the indices of the data array.
"""
def __init__(self, ax=None, fig=None, features=None, **kwargs):
super(MultiFeatureVisualizer, self).__init__(ax=ax, fig=fig, **kwargs)
# Data Parameters
self.features = features
def fit(self, X, y=None):
"""
This method performs preliminary computations in order to set up the
figure or perform other analyses. It can also call drawing methods in
order to set up various non-instance related figure elements.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature dataset to be transformed.
y : array-like, shape (n_samples,)
Optional dependent target data associated with X.
Returns
-------
self : MultiFeatureVisualizer
Returns the visualizer/transformer for use in Pipelines and chaining.
"""
n_columns = X.shape[1]
if self.features is not None:
# Use the user-specified features with some checking
# TODO: allow the user specified features to filter the dataset
if len(self.features) != n_columns:
raise YellowbrickValueError(
(
"number of supplied feature names does not match the number "
"of columns in the training data."
)
)
self.features_ = np.array(self.features)
else:
# Attempt to determine the feature names from the input data
if is_dataframe(X):
self.features_ = np.array(X.columns)
# Otherwise create numeric labels for each column.
else:
self.features_ = np.arange(0, n_columns)
# Ensure super is called and fit is returned
super(MultiFeatureVisualizer, self).fit(X, y)
return self
##########################################################################
## Data Visualizers
##########################################################################
class DataVisualizer(MultiFeatureVisualizer):
"""Visualizations of instances in feature space.
Data Visualizers plot instances in feature space (sometimes also referred
to as data space). Feature space is a multi-dimensional space defined by
the columns of the dataset ``X`` when passed to ``fit()`` and ``transform``.
These instances and their features are directly plotted in a representation
of the higher dimensional space.
Instances can also be labeled by an target vectory, ``y``. The target is
visualized in data space by color. For example a discrete target for
classification problems will use categorical colors and a legend. A
continuous target for regression problems will use sequential colors with
a colormap.
This class provides helper functionality related to target identification:
whether or not the target is sequential or categorical, and mapping a
color sequence or color set to the targets as appropriate. It also
determines the scope of the target, e.g. the unique classes or the range
of the dataset for use in specific visualizations.
Parameters
----------
ax : matplotlib.Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
fig : matplotlib Figure, default: None
The figure to plot the Visualizer on. If None is passed in the current
plot will be used (or generated if required).
features : list, default: None
The names of the features specified by the columns of the input dataset.
This length of this list must match the number of columns in X, otherwise
an exception will be raised on ``fit()``.
classes : list, default: None
The class labels for each class in y, ordered by sorted class index. These
names act as a label encoder for the legend, identifying integer classes
or renaming string labels. If omitted, the class labels will be taken from
the unique values in y.
Note that the length of this list must match the number of unique values in
y, otherwise an exception is raised. This parameter is only used in the
discrete target type case and is ignored otherwise.
colors : list or tuple, default: None
A single color to plot all instances as or a list of colors to color each
instance according to its class in the discrete case or as an ordered
colormap in the sequential case. If not enough colors per class are
specified then the colors are treated as a cycle.
colormap : string or cmap, default: None
The colormap used to create the individual colors. In the discrete case
it is used to compute the number of colors needed for each class and
in the continuous case it is used to create a sequential color map based
on the range of the target.
target_type : str, default: "auto"
Specify the type of target as either "discrete" (classes) or "continuous"
(real numbers, usually for regression). If "auto", then it will
attempt to determine the type by counting the number of unique values.
If the target is discrete, the colors are returned as a dict with classes
being the keys. If continuous the colors will be list having value of
color for each point. In either case, if no target is specified, then
color will be specified as the first color in the color cycle.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
features_ : ndarray, shape (n_features,)
The names of the features discovered or used in the visualizer that
can be used as an index to access or modify data in X. If a user passes
feature names in, those features are used. Otherwise the columns of a
DataFrame are used or just simply the indices of the data array.
classes_ : ndarray, shape (n_classes,)
The class labels that define the discrete values in the target. Only
available if the target type is discrete. This is guaranteed to be
strings even if the classes are a different type.
range_ : (min y, max y)
A tuple that describes the minimum and maximum values in the target.
Only available if the target type is continuous.
"""
def __init__(
self,
ax=None,
fig=None,
features=None,
classes=None,
colors=None,
colormap=None,
target_type="auto",
**kwargs
):
super(DataVisualizer, self).__init__(
ax=ax, fig=fig, features=features, **kwargs
)
# Validate raises YellowbrickValueError if invalid
TargetType.validate(target_type)
# Data Parameters
self.classes = classes
self.target_type = target_type
# Visual Parameters
self.colors = colors
self.colormap = colormap
# Internal attributes
self._colors = None
self._target_color_type = None
self._label_encoder = None
def fit(self, X, y=None):
"""
Fits the visualizer to the training data set by determining the
target type, colors, classes, and range of the data to ensure that
the visualizer can accurately portray the instances in data space.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
Returns
-------
self : DataVisualizer
Returns the instance of the transformer/visualizer
"""
# Compute the features from the data
super(DataVisualizer, self).fit(X, y)
# Determine the target color type
self._determine_target_color_type(y)
# Handle the single target color type
if self._target_color_type == TargetType.SINGLE:
# use the user supplied color or the first color in the color cycle
self._colors = self.colors or "C0"
# Compute classes and colors if target type is discrete
elif self._target_color_type == TargetType.DISCRETE:
# Unique labels are used both for validation and color mapping
labels = np.unique(y)
# Handle user supplied classes
if self.classes is not None:
self.classes_ = np.asarray([str(c) for c in self.classes])
# Validate user supplied class labels
if len(self.classes_) != len(labels):
raise YellowbrickValueError(
(
"number of specified classes does not match "
"number of unique values in target"
)
)
# Get the string labels from the unique values in y
else:
self.classes_ = np.asarray([str(c) for c in labels])
# Create a map of class labels to colors
color_values = resolve_colors(
n_colors=len(self.classes_), colormap=self.colormap, colors=self.colors
)
self._colors = dict(zip(self.classes_, color_values))
self._label_encoder = dict(zip(labels, self.classes_))
# Compute target range if colors are continuous
elif self._target_color_type == TargetType.CONTINUOUS:
y = np.asarray(y)
self.range_ = (y.min(), y.max())
if self.colormap is None:
self.colormap = palettes.DEFAULT_SEQUENCE
# TODO: allow for Yellowbrick palettes here as well
self._colors = mpl.cm.get_cmap(self.colormap)
# If this exception is raised a developer error has occurred because
# unknown types should have errored when the type was determined.
else:
raise YellowbrickValueError(
"unknown target color type '{}'".format(self._target_color_type)
)
# NOTE: cannot call draw in fit to support data transformers
return self
def _determine_target_color_type(self, y):
"""
Determines the target color type from the vector y as follows:
- if y is None: only a single color is used
- if target is auto: determine if y is continuous or discrete
- otherwise specify supplied target type
This property will be used to compute the colors for each point.
"""
if y is None:
self._target_color_type = TargetType.SINGLE
elif self.target_type == TargetType.AUTO:
self._target_color_type = target_color_type(y)
else:
self._target_color_type = TargetType(self.target_type)
# Ensures that target is either SINGLE, DISCRETE or CONTINUOUS before continuing
if (
self._target_color_type == TargetType.AUTO
or self._target_color_type == TargetType.UNKNOWN
):
raise YellowbrickValueError(
(
"could not determine target color type " "from target='{}' to '{}'"
).format(self.target_type, self._target_color_type)
)
def get_target_color_type(self):
"""
Returns the computed target color type if fitted or specified by the user.
"""
if self._target_color_type is None:
raise NotFitted("unknown target color type on unfitted visualizer")
return self._target_color_type
def get_colors(self, y):
"""
Returns the color for the specified value(s) of y based on the learned
colors property for any specified target type.
Parameters
----------
y : array-like
The values of y to get the associated colors for.
Returns
-------
colors : list
Returns a list of colors for each value in y.
"""
if self._colors is None:
raise NotFitted("cannot determine colors on unfitted visualizer")
if self._target_color_type == TargetType.SINGLE:
return [self._colors] * len(y)
if self._target_color_type == TargetType.DISCRETE:
try:
# Use the label encoder to get the class name (or use the value
# if the label is not mapped in the encoder) then use the class
# name to get the color from the color map.
return [self._colors[self._label_encoder.get(yi, yi)] for yi in y]
except KeyError:
unknown = set(y) - set(self._label_encoder.keys())
unknown = ", ".join(["'{}'".format(uk) for uk in unknown])
raise YellowbrickKeyError(
"could not determine color for classes {}".format(unknown)
)
if self._target_color_type == TargetType.CONTINUOUS:
# Normalize values into target range and compute colors from colormap
norm = Normalize(*self.range_)
return self._colors(norm(y))
# This is a developer error, we should never get here!
raise YellowbrickValueError(
"unknown target color type '{}'".format(self._target_color_type)
)
| apache-2.0 |
AlexGidiotis/Multimodal-Gesture-Recognition-with-LSTMs-and-CTC | multimodal_fusion/multimodal.py | 1 | 6647 | import random
import time
import itertools
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from keras.preprocessing import sequence
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, LSTM, Input, Lambda, TimeDistributed, Merge
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.layers.wrappers import Bidirectional
from keras import backend as K
from keras.optimizers import RMSprop, Adam, Nadam
import keras.callbacks
from keras.models import load_model
from keras.models import model_from_json
from keras.layers.noise import GaussianNoise
from keras.regularizers import l1,l2
from keras.constraints import maxnorm
from keras import layers
from keras.initializers import RandomUniform
from data_generator import DataGenerator
from losses import ctc_lambda_func
def layer_trainable(l,
freeze,
verbose=False,
bidir_fix=True):
"""
Freeze the Bidirectional Layers
The Bidirectional wrapper is buggy and does not support freezing.
This is a workaround that freezes each bidirectional layer.
"""
l.trainable = freeze
if bidir_fix:
if type(l) == Bidirectional:
l.backward_layer.trainable = not freeze
l.forward_layer.trainable = not freeze
if verbose:
if freeze:
action='Froze'
else :
action='Unfroze'
print("{} {}".format(action, l.name))
def build_model(maxlen,
numfeats_speech,
numfeats_skeletal,
nb_classes,
lab_seq_len):
"""
"""
K.set_learning_phase(1)
skeletal_model_file = '../skeletal_network/sk_ctc_lstm_model.json'
skeletal_weights = '../skeletal_network/sk_ctc_lstm_weights_best.h5'
speech_model_file = '../audio_network/sp_ctc_lstm_model.json'
speech_weights = '../audio_network/sp_ctc_lstm_weights_best.h5'
json_file = open(skeletal_model_file, 'r')
skeletal_model_json = json_file.read()
json_file.close()
skeletal_model = model_from_json(skeletal_model_json)
skeletal_model.load_weights(skeletal_weights)
json_file = open(speech_model_file, 'r')
speech_model_json = json_file.read()
json_file.close()
speech_model = model_from_json(speech_model_json)
speech_model.load_weights(speech_weights)
uni_initializer = RandomUniform(minval=-0.05,
maxval=0.05,
seed=47)
input_shape_a = (maxlen, numfeats_speech)
input_shape_s = (maxlen, numfeats_skeletal)
input_data_a = Input(name='the_input_audio',
shape=input_shape_a,
dtype='float32')
input_data_s = Input(name='the_input_skeletal',
shape=input_shape_s,
dtype='float32')
input_noise_a = GaussianNoise(stddev=0.5,
name='gaussian_noise_a')(input_data_a)
input_noise_s = GaussianNoise(stddev=0.0,
name='gaussian_noise_s')(input_data_s)
blstm_1_a = speech_model.layers[2](input_noise_a)
blstm_2_a = speech_model.layers[3](blstm_1_a)
res_a_1 = layers.add([blstm_1_a, blstm_2_a],
name='speech_residual')
blstm_1_s = skeletal_model.layers[2](input_noise_s)
blstm_2_s = skeletal_model.layers[3](blstm_1_s)
res_s_1 = layers.add([blstm_1_s, blstm_2_s],
name='skeletal_residual')
model_a = Model(input=[input_data_a],
output=res_a_1)
model_a.layers[2].name='speech_blstm_1'
model_a.layers[3].name='speech_blstm_2'
model_s = Model(input=[input_data_s],
output=res_s_1)
model_s.layers[2].name='skeletal_blstm_1'
model_s.layers[3].name='skeletal_blstm_2'
# attempt to freeze all Bidirectional layers.
# Bidirectional wrapper layer is buggy so we need to freeze the weights this way.
frozen_types = [Bidirectional]
# Go through layers for both networks and freeze the weights of Bidirectional layers.
for l_a,l_s in zip(model_a.layers,model_s.layers):
if len(l_a.trainable_weights):
if type(l_a) in frozen_types:
layer_trainable(l_a,
freeze=True,
verbose=True)
if len(l_s.trainable_weights):
if type(l_s) in frozen_types:
layer_trainable(l_s,
freeze=True,
verbose=True)
model_a.summary()
model_s.summary()
merged = Merge([model_a, model_s],
mode='concat')([res_a_1,res_s_1])
lstm_3 = Bidirectional(LSTM(100,
name='blstm_2',
activation='tanh',
recurrent_activation='hard_sigmoid',
recurrent_dropout=0.0,
dropout=0.5,
kernel_constraint=maxnorm(3),
kernel_initializer=uni_initializer,
return_sequences=True),
merge_mode='concat')(merged)
dropout_3 = Dropout(0.5,
name='dropout_layer_3')(lstm_3)
inner = Dense(nb_classes,
name='dense_1',
kernel_initializer=uni_initializer)(dropout_3)
y_pred = Activation('softmax',
name='softmax')(inner)
Model(input=[input_data_a,input_data_s],
output=y_pred).summary()
labels = Input(name='the_labels',
shape=[lab_seq_len],
dtype='float32')
input_length = Input(name='input_length',
shape=[1],
dtype='int64')
label_length = Input(name='label_length',
shape=[1],
dtype='int64')
loss_out = Lambda(ctc_lambda_func,
output_shape=(1,),
name="ctc")([y_pred, labels, input_length, label_length])
model = Model(input=[input_data_a,input_data_s, labels, input_length, label_length],
output=[loss_out])
adam = Adam(lr=0.0001,
clipvalue=0.5,
decay=1e-5)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred},
optimizer=adam)
return model
if __name__ == '__main__':
minibatch_size = 2
val_split = 0.2
maxlen = 1900
nb_classes = 22
nb_epoch = 500
numfeats_speech = 39
numfeats_skeletal = 20
dataset='train'
data_gen = DataGenerator(minibatch_size=minibatch_size,
numfeats_skeletal=numfeats_skeletal,
numfeats_speech=numfeats_speech,
maxlen=maxlen,
dataset=dataset,
val_split=val_split,
nb_classes=nb_classes)
lab_seq_len = data_gen.absolute_max_sequence_len
model = build_model(maxlen,
numfeats_speech,
numfeats_skeletal,
nb_classes,
lab_seq_len)
earlystopping = EarlyStopping(monitor='val_loss',
patience=20,
verbose=1)
filepath="multimodal_ctc_lstm_weights_best.h5"
checkpoint = ModelCheckpoint(filepath,
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=True,
mode='auto')
print 'Start training.'
start_time = time.time()
model.fit_generator(generator=data_gen.next_train(),
steps_per_epoch=(data_gen.get_size(train=True)/minibatch_size),
epochs=nb_epoch,
validation_data=data_gen.next_val(),
validation_steps=(data_gen.get_size(train=False)/minibatch_size),
callbacks=[checkpoint, data_gen])
end_time = time.time()
print "--- Training time: %s seconds ---" % (end_time - start_time)
| mit |
google/lasr | third_party/PerceptualSimilarity/perceptual_loss.py | 1 | 2966 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from torch.autograd import Variable
from models import dist_model
class PerceptualLoss(torch.nn.Module):
def __init__(self, model='net-lin', net='vgg', use_gpu=True): # VGG using our perceptually-learned weights (LPIPS metric)
# def __init__(self, model='net', net='vgg', use_gpu=True): # "default" way of using VGG
print('Setting up Perceptual loss..')
self.model = dist_model.DistModel()
self.model.initialize(model=model, net=net, use_gpu=True)
print('Done')
def forward(self, pred, target, normalize=False):
"""
Pred and target are Variables.
If normalize is on, scales images between [-1, 1]
Assumes the inputs are in range [0, 1].
"""
if normalize:
target = 2 * target - 1
pred = 2 * pred - 1
dist = self.model.forward_pair(target, pred)
return dist
if __name__ == '__main__':
import scipy
import numpy as np
ref_path = './imgs/ex_ref.png'
pred_path = './imgs/ex_p0.png'
ref_img = scipy.misc.imread(ref_path).transpose(2, 0, 1) / 255.
pred_img = scipy.misc.imread(pred_path).transpose(2, 0, 1) / 255.
# Torchify
ref = Variable(torch.FloatTensor(ref_img).cuda(), requires_grad=False)
pred = Variable(torch.FloatTensor(pred_img).cuda())
# 1 x 3 x H x W
ref = ref.unsqueeze(0)
pred = pred.unsqueeze(0)
loss_fn = PerceptualLoss()
dist = loss_fn.forward(pred, ref, normalize=True)
# print('Dist %.3f' % dist.data[0].cpu().numpy())
# As optimization, test backprop
class PerceptModel(torch.nn.Module):
def __init__(self):
super(PerceptModel, self).__init__()
self.pred = torch.nn.Parameter(pred.data)
def forward(self):
return self.pred
model = PerceptModel()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, betas=(0.9, 0.999))
import matplotlib.pyplot as plt
plt.ion()
fig = plt.figure(1)
ax = fig.add_subplot(131)
ax.imshow(ref_img.transpose(1, 2, 0))
ax.set_title('reference')
ax = fig.add_subplot(133)
ax.imshow(pred_img.transpose(1, 2, 0))
ax.set_title('orig pred')
for i in range(1000):
optimizer.zero_grad()
dist = loss_fn.forward(model.forward(), ref, normalize=True)
dist.backward()
if i % 10 == 0:
print('iter %d, dist %.3g' % (i, dist.view(-1).data.cpu().numpy()[0]))
pred_img = model.pred[0].data.cpu().numpy().transpose(1, 2, 0)
ax = fig.add_subplot(132)
ax.imshow(np.clip(pred_img, 0, 1))
ax.set_title('iter %d, dist %.3g' % (i, dist.view(-1).data.cpu().numpy()[0]))
plt.pause(5e-2)
optimizer.step()
# from IPython import embed; embed()
# import ipdb; ipdb.set_trace()
| apache-2.0 |
thientu/scikit-learn | sklearn/neighbors/regression.py | 100 | 11017 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck <[email protected]>
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
solashirai/edx-platform | lms/djangoapps/course_blocks/usage_info.py | 89 | 1246 | """
Declares CourseUsageInfo class to be used by the transform method in
Transformers.
"""
from lms.djangoapps.courseware.access import _has_access_to_course
class CourseUsageInfo(object):
'''
A class object that encapsulates the course and user context to be
used as currency across block structure transformers, by passing
an instance of it in calls to BlockStructureTransformer.transform
methods.
'''
def __init__(self, course_key, user):
# Course identifier (opaque_keys.edx.keys.CourseKey)
self.course_key = course_key
# User object (django.contrib.auth.models.User)
self.user = user
# Cached value of whether the user has staff access (bool/None)
self._has_staff_access = None
@property
def has_staff_access(self):
'''
Returns whether the user has staff access to the course
associated with this CourseUsageInfo instance.
For performance reasons (minimizing multiple SQL calls), the
value is cached within this instance.
'''
if self._has_staff_access is None:
self._has_staff_access = _has_access_to_course(self.user, 'staff', self.course_key)
return self._has_staff_access
| agpl-3.0 |
LACNIC/labs-opendata-datasets | bin/venn_asociados.py | 1 | 2990 | # -*- coding: utf-8 -*-
"""
venn_asociados: calculates and plots a venn diagram showing lacnic member groupings
(c) Carlos M:, [email protected]
"""
import sqlite3 as sq
import os
import matplotlib_venn as mpv
import json
from matplotlib import pyplot as plt
if os.name == "nt":
os.chdir(r"C:\Users\carlos\Dropbox (Personal)\Workspaces\LACNIC-Wksp\70-checkouts\labs-opendata-datasets.git")
elif os.name == "posix":
os.chdir("/Users/carlos/Dropbox (Personal)/Workspaces/LACNIC-Wksp/70-checkouts/labs-opendata-datasets.git")
else:
print("WARN unable to detect operating system")
def runq(type,rir='lacnic'):
# TODO: parametrize date
c=sq.connect("var/netdata-2017-08-23.db")
cur=c.cursor()
r1=cur.execute("select distinct(orgid) from numres where rir='%s' and type='%s' and (status='allocated' or status='assigned')"
% (rir, type) )
rcorgs=[ x[0] for x in r1.fetchall() ]
return rcorgs
# end runq
class SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
# end default
# end SetEncoder
if __name__ == "__main__":
rsets = { 'ipv4':0, 'ipv6':0, 'asn':0 }
for rc in rsets.keys():
print("get orgs with assigned %s" % (rc) )
rsets[rc] = set(runq(rc))
print("%s: %s" % (rc, len(rsets[rc])) )
# end for
ipv4nipv6 = rsets['ipv4'] & rsets['ipv6']
# print("orgs with both ipv4 and ipv6: %s" % (len(ipv4nipv6) ) )
ipv6only = rsets['ipv6'] - rsets['ipv4']
#print("orgs with ipv6 only: %s" % (len(ipv6only) ) )
s5 = rsets['ipv4'] & rsets['ipv6'] & rsets['asn']
rsets['Orgs with ipv4, ipv6 and asn'] = s5
s2 = (rsets['ipv4'] & rsets['ipv6']) - s5
rsets['Orgs with ipv4 and ipv6 but no asn'] = s2
s4 = (rsets['ipv4'] & rsets['asn']) - s5
rsets['Orgs with ipv4 and asn but no ipv6'] = s4
s6 = (rsets['ipv6'] & rsets['asn']) - s5
rsets['Orgs with ipv6 and asn but no ipv4'] = s6
s1 = rsets['ipv4'] - s4 -s5 - s2
rsets['Orgs with ipv4 only'] = s1
s3 = rsets['ipv6'] - s6 - s5 - s2
rsets['Orgs with ipv6 only'] = s3
s7 = rsets['asn'] - s5 - s4 - s6
rsets['Orgs with asn only'] = s7
# mpv.venn3(
# subsets = (len(s1),len(s2),len(s3),len(s4),len(s5),len(s6),len(s7) ),
# set_labels = ('ipv4','asn','ipv6' )
# )
# print text info
for rc in rsets.keys():
print("%s: %s" % (rc,len(rsets[rc])) )
# draw plot
plt.figure(figsize=(10,10))
plt.title("Asociados LACNIC", fontsize=24)
mpv.venn3([rsets['ipv4'], rsets['ipv6'], rsets['asn']], ('IPv4', 'IPv6', 'ASN'))
plt.show()
# print json
fp = open("var/venn-asociados-20170823.json", "w")
# fp = io.FileIO("var/venn-asociados-20170815.json","w")
json.dump(rsets, fp, cls=SetEncoder, indent=4)
fp.close() | bsd-2-clause |
Fireblend/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 142 | 22295 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([0, 1, 2])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
ningchi/scikit-learn | examples/neighbors/plot_classification.py | 285 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
DistrictDataLabs/yellowbrick | yellowbrick/download.py | 1 | 3026 | # yellowbrick.download
# Downloads the example datasets for running the examples.
#
# Author: Rebecca Bilbro
# Author: Benjamin Bengfort
# Created: Wed May 18 11:54:45 2016 -0400
#
# Copyright (C) 2016 The sckit-yb developers
# For license information, see LICENSE.txt
#
# ID: download.py [1f73d2b] [email protected] $
"""
Downloads the example datasets for running the examples.
"""
##########################################################################
## Imports
##########################################################################
import os
import argparse
from yellowbrick.datasets import get_data_home
from yellowbrick.datasets.loaders import DATASETS
from yellowbrick.datasets.download import download_data
from yellowbrick.datasets.path import cleanup_dataset
##########################################################################
## Functions
##########################################################################
def download_all(data_home=None, replace=False):
"""
Downloads all the example datasets to the data directory specified by
``get_data_home``. This function ensures that all datasets are available
for use with the examples.
"""
for _, meta in DATASETS.items():
download_data(
meta["url"], meta["signature"], data_home=data_home, replace=replace
)
print(
"Downloaded {} datasets to {}".format(len(DATASETS), get_data_home(data_home))
)
def cleanup_all(data_home=None):
"""
Cleans up all the example datasets in the data directory specified by
``get_data_home`` either to clear up disk space or start from fresh.
"""
removed = 0
for name, meta in DATASETS.items():
_, ext = os.path.splitext(meta["url"])
removed += cleanup_dataset(name, data_home=data_home, ext=ext)
print(
"Removed {} fixture objects from {}".format(removed, get_data_home(data_home))
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Yellowbrick data downloader",
epilog="for troubleshooting please visit our GitHub issues",
)
parser.add_argument(
"-c",
"--cleanup",
action="store_true",
default=False,
help="cleanup any existing datasets before download",
)
parser.add_argument(
"--no-download",
action="store_true",
default=False,
help="prevent new data from being downloaded",
)
parser.add_argument(
"-f",
"--overwrite",
action="store_true",
default=False,
help="overwrite any existing data with new download",
)
parser.add_argument(
"data_home",
default=None,
nargs="?",
help="specify the data download location or set $YELLOWBRICK_DATA",
)
args = parser.parse_args()
if args.cleanup:
cleanup_all(data_home=args.data_home)
if not args.no_download:
download_all(data_home=args.data_home, replace=args.overwrite)
| apache-2.0 |
qiime2/q2-diversity | q2_diversity/_ordination.py | 1 | 3764 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from sklearn.manifold import TSNE
import skbio.stats.ordination
import pandas as pd
import numpy as np
import umap as up
def pcoa(distance_matrix: skbio.DistanceMatrix,
number_of_dimensions: int = None) -> skbio.OrdinationResults:
if number_of_dimensions is None:
# calculate full decomposition using eigh
return skbio.stats.ordination.pcoa(distance_matrix, method='eigh',
inplace=False)
else:
# calculate the decomposition only for the `number_of_dimensions`
# using fast heuristic eigendecomposition (fsvd)
return skbio.stats.ordination.pcoa(
distance_matrix, method='fsvd',
number_of_dimensions=number_of_dimensions,
inplace=False)
def pcoa_biplot(pcoa: skbio.OrdinationResults,
features: pd.DataFrame) -> skbio.OrdinationResults:
return skbio.stats.ordination.pcoa_biplot(pcoa, features)
def tsne(distance_matrix: skbio.DistanceMatrix,
number_of_dimensions: int = 2,
perplexity: float = 25.0,
n_iter: int = 1000,
learning_rate: float = 200.0,
early_exaggeration: float = 12.0,
random_state: int = None) -> skbio.OrdinationResults:
data = distance_matrix.data
ids = distance_matrix.ids
tsne = TSNE(number_of_dimensions, perplexity=perplexity,
learning_rate=learning_rate,
n_iter=n_iter,
early_exaggeration=early_exaggeration,
random_state=random_state).fit_transform(data)
if number_of_dimensions == 2:
number_of_dimensions = 3
add_zeros = np.zeros((tsne.shape[0], 1), dtype=np.int64)
tsne = np.append(tsne, add_zeros, axis=1)
axis_labels = ["TSNE%d" % i for i in range(1, number_of_dimensions + 1)]
eigenvalues = [0 for i in axis_labels]
return skbio.OrdinationResults(
short_method_name="T-SNE",
long_method_name="t-distributed stochastic neighbor embedding",
eigvals=pd.Series(eigenvalues, index=axis_labels),
proportion_explained=pd.Series(None, index=axis_labels),
samples=pd.DataFrame(tsne, index=ids, columns=axis_labels),
)
def umap(distance_matrix: skbio.DistanceMatrix,
number_of_dimensions: int = 2,
n_neighbors: int = 15,
min_dist: float = 0.4,
random_state: int = None) -> skbio.OrdinationResults:
data = distance_matrix.data
ids = distance_matrix.ids
umap_results = up.UMAP(n_components=number_of_dimensions,
n_neighbors=n_neighbors,
min_dist=min_dist,
random_state=random_state).fit_transform(data)
if number_of_dimensions == 2:
number_of_dimensions = 3
add_zeros = np.zeros((umap_results.shape[0], 1), dtype=np.int64)
umap_results = np.append(umap_results, add_zeros, axis=1)
axis_labels = ["UMAP%d" % i for i in range(1, number_of_dimensions + 1)]
eigenvalues = [0 for i in axis_labels]
return skbio.OrdinationResults(
short_method_name="UMAP",
long_method_name="Uniform Manifold Approximation and Projection",
eigvals=pd.Series(eigenvalues, index=axis_labels),
proportion_explained=pd.Series(None, index=axis_labels),
samples=pd.DataFrame(umap_results, index=ids, columns=axis_labels),
)
| bsd-3-clause |
ales-erjavec/orange | Orange/OrangeWidgets/Visualize/OWMosaicDisplay.py | 6 | 49381 | """
<name>Mosaic Display</name>
<description>Shows a mosaic display.</description>
<contact>Gregor Leban ([email protected])</contact>
<icon>icons/MosaicDisplay.svg</icon>
<priority>4100</priority>
"""
# OWMosaicDisplay.py
#
from OWWidget import *
import OWGUI
from OWMosaicOptimization import *
from OWTools import getHtmlCompatibleString
from math import sqrt, floor, ceil, pow
import operator
from orngScaleData import getVariableValuesSorted, getVariableValueIndices
from OWQCanvasFuncts import *
import OWColorPalette
import OWDlgs
from orngVisFuncts import permutations
from copy import copy
PEARSON = 0
CLASS_DISTRIBUTION = 1
BOTTOM = 0
LEFT = 1
TOP = 2
RIGHT = 3
class SelectionRectangle(QGraphicsRectItem):
pass
class MosaicSceneView(QGraphicsView):
def __init__(self, widget, *args):
apply(QGraphicsView.__init__,(self,) + args)
self.widget = widget
self.bMouseDown = False
self.mouseDownPosition = QPoint(0,0)
self.tempRect = None
# mouse button was pressed
def mousePressEvent(self, ev):
QGraphicsView.mousePressEvent(self, ev)
self.mouseDownPosition = QPoint(ev.pos().x(), ev.pos().y())
self.bMouseDown = True
self.mouseMoveEvent(ev)
# mouse button was pressed and mouse is moving ######################
def mouseMoveEvent(self, ev):
QGraphicsView.mouseMoveEvent(self, ev)
if ev.button() == Qt.RightButton:
return
if not self.bMouseDown:
if self.tempRect:
self.scene().removeItem(self.tempRect)
self.tempRect = None
else:
if not self.tempRect:
self.tempRect = SelectionRectangle(None, self.scene())
rect = QRectF(min(self.mouseDownPosition.x(), ev.pos().x()), min (self.mouseDownPosition.y(), ev.pos().y()), max(abs(self.mouseDownPosition.x() - ev.pos().x()),1), max(abs(self.mouseDownPosition.y() - ev.pos().y()),1))
self.tempRect.setRect(rect)
# mouse button was released #########################################
def mouseReleaseEvent(self, ev):
self.bMouseDown = False
if ev.button() == Qt.RightButton:
self.widget.removeLastSelection()
elif self.tempRect:
self.widget.addSelection(self.tempRect)
self.scene().removeItem(self.tempRect)
self.tempRect = None
class OWMosaicDisplay(OWWidget):
settingsList = ["horizontalDistribution", "showAprioriDistributionLines", "showAprioriDistributionBoxes",
"horizontalDistribution", "useBoxes", "interiorColoring", "boxSize", "colorSettings", "selectedSchemaIndex", "cellspace",
"showSubsetDataBoxes", "removeUnusedValues"]
contextHandlers = {"": DomainContextHandler("", ["attr1", "attr2", "attr3", "attr4", "manualAttributeValuesDict"], loadImperfect = 0)}
interiorColoringOpts = ["Standardized (Pearson) residuals", "Class distribution"]
subboxesOpts = ["Expected class distribution", "Apriori class distribution"]
def __init__(self,parent=None, signalManager = None):
OWWidget.__init__(self, parent, signalManager, "Mosaic display", True, True)
#set default settings
self.data = None
self.unprocessedSubsetData = None
self.subsetData = None
self.names = [] # class values
self.inputs = [("Data", ExampleTable, self.setData, Default), ("Data Subset", ExampleTable, self.setSubsetData)]
self.outputs = [("Selected Data", ExampleTable), ("Learner", orange.Learner)]
#load settings
self.colorSettings = None
self.selectedSchemaIndex = 0
self.interiorColoring = 0
self.cellspace = 4
self.showAprioriDistributionBoxes = 1
self.useBoxes = 1
self.showSubsetDataBoxes = 1
self.horizontalDistribution = 0
self.showAprioriDistributionLines = 0
self.boxSize = 5
self.exploreAttrPermutations = 0
self.attr1 = ""
self.attr2 = ""
self.attr3 = ""
self.attr4 = ""
self.attributeNameOffset = 30
self.attributeValueOffset = 15
self.residuals = [] # residual values if the residuals are visualized
self.aprioriDistributions = []
self.colorPalette = None
self.permutationDict = {}
self.manualAttributeValuesDict = {}
self.conditionalDict = None
self.conditionalSubsetDict = None
self.activeRule = None
self.removeUnusedValues = 0
self.selectionRectangle = None
self.selectionConditionsHistorically = []
self.selectionConditions = []
# color paletes for visualizing pearsons residuals
#self.blueColors = [QColor(255, 255, 255), QColor(117, 149, 255), QColor(38, 43, 232), QColor(1,5,173)]
self.blueColors = [QColor(255, 255, 255), QColor(210, 210, 255), QColor(110, 110, 255), QColor(0,0,255)]
self.redColors = [QColor(255, 255, 255), QColor(255, 200, 200), QColor(255, 100, 100), QColor(255, 0, 0)]
self.loadSettings()
self.tabs = OWGUI.tabWidget(self.controlArea)
self.GeneralTab = OWGUI.createTabPage(self.tabs, "Main")
self.SettingsTab = OWGUI.createTabPage(self.tabs, "Settings")
self.canvas = QGraphicsScene()
self.canvasView = MosaicSceneView(self, self.canvas, self.mainArea)
self.mainArea.layout().addWidget(self.canvasView)
self.canvasView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.canvasView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.canvasView.setRenderHint(QPainter.Antialiasing)
#self.canvasView.setAlignment(Qt.AlignLeft | Qt.AlignTop)
#GUI
#add controls to self.controlArea widget
#self.controlArea.setMinimumWidth(235)
texts = ["1st Attribute", "2nd Attribute", "3rd Attribute", "4th Attribute"]
for i in range(1,5):
box = OWGUI.widgetBox(self.GeneralTab, texts[i-1], orientation = "horizontal")
combo = OWGUI.comboBox(box, self, "attr" + str(i), None, callback = self.updateGraphAndPermList, sendSelectedValue = 1, valueType = str)
butt = OWGUI.button(box, self, "", callback = self.orderAttributeValues, tooltip = "Change the order of attribute values", debuggingEnabled = 0)
butt.setFixedSize(26, 24)
butt.setCheckable(1)
butt.setIcon(QIcon(os.path.join(self.widgetDir, "icons/Dlg_sort.png")))
setattr(self, "sort"+str(i), butt)
setattr(self, "attr" + str(i)+ "Combo", combo)
self.optimizationDlg = OWMosaicOptimization(self, self.signalManager)
optimizationButtons = OWGUI.widgetBox(self.GeneralTab, "Dialogs", orientation = "horizontal")
OWGUI.button(optimizationButtons, self, "VizRank", callback = self.optimizationDlg.reshow, debuggingEnabled = 0, tooltip = "Find attribute combinations that will separate different classes as clearly as possible.")
self.collapsableWBox = OWGUI.collapsableWidgetBox(self.GeneralTab, "Explore Attribute Permutations", self, "exploreAttrPermutations", callback = self.permutationListToggle)
self.permutationList = OWGUI.listBox(self.collapsableWBox, self, callback = self.setSelectedPermutation)
#self.permutationList.hide()
self.GeneralTab.layout().addStretch(100)
# ######################
# SETTINGS TAB
# ######################
box5 = OWGUI.widgetBox(self.SettingsTab, "Colors in Cells Represent...", addSpace = 1)
OWGUI.comboBox(box5, self, "interiorColoring", None, items = self.interiorColoringOpts, callback = self.updateGraph)
#box5.setSizePolicy(QSizePolicy(QSizePolicy.Minimum , QSizePolicy.Fixed ))
box = OWGUI.widgetBox(self.SettingsTab, "Visual Settings", addSpace = 1)
OWGUI.hSlider(box, self, 'cellspace', label = "Cell distance: ", minValue=1, maxValue=15, step=1, callback = self.updateGraph, tooltip = "What is the minimum distance between two rectangles in the plot?")
OWGUI.checkBox(box, self, "removeUnusedValues", "Remove unused attribute values", tooltip = "Do you want to remove unused attribute values?\nThis setting will not be considered until new data is received.")
self.box6 = OWGUI.widgetBox(self.SettingsTab, "Cell Distribution Settings", addSpace = 1)
OWGUI.comboBox(self.box6, self, 'horizontalDistribution', items = ["Show Distribution Vertically", "Show Distribution Horizontally"], tooltip = "Do you wish to see class distribution drawn horizontally or vertically?", callback = self.updateGraph)
OWGUI.checkBox(self.box6, self, 'showAprioriDistributionLines', 'Show apriori distribution with lines', callback = self.updateGraph, tooltip = "Show the lines that represent the apriori class distribution")
self.box8 = OWGUI.widgetBox(self.SettingsTab, "Boxes in Cells", addSpace = 1)
OWGUI.hSlider(self.box8, self, 'boxSize', label = "Size: ", minValue=1, maxValue=15, step=1, callback = self.updateGraph, tooltip = "What is the size of the boxes on the left and right edge of each cell?")
OWGUI.checkBox(self.box8, self, 'showSubsetDataBoxes', 'Show class distribution of subset data', callback = self.updateGraph, tooltip = "Show small boxes at right (or bottom) edge of cells to represent class distribution of examples from example subset input.")
cb = OWGUI.checkBox(self.box8, self, 'useBoxes', 'Use boxes on left to show...', callback = self.updateGraph, tooltip = "Show small boxes at left (or top) edge of cells to represent additional information.")
indBox = OWGUI.indentedBox(self.box8, sep=OWGUI.checkButtonOffsetHint(cb))
OWGUI.comboBox(indBox, self, 'showAprioriDistributionBoxes', items = self.subboxesOpts, tooltip = "Show additional boxes for each mosaic cell representing:\n - expected class distribution (assuming independence between attributes)\n - apriori class distribution (based on all examples).", callback = self.updateGraph)
hbox = OWGUI.widgetBox(self.SettingsTab, "Colors", addSpace = 1)
OWGUI.button(hbox, self, "Set Colors", self.setColors, tooltip = "Set the color palette for class values", debuggingEnabled = 0)
#self.box6.setSizePolicy(QSizePolicy(QSizePolicy.Minimum , QSizePolicy.Fixed ))
self.SettingsTab.layout().addStretch(1)
self.connect(self.graphButton, SIGNAL("clicked()"), self.saveToFileCanvas)
self.icons = self.createAttributeIconDict()
self.resize(830, 550)
self.VizRankLearner = MosaicTreeLearner(self.optimizationDlg)
self.send("Learner", self.VizRankLearner)
self.wdChildDialogs = [self.optimizationDlg] # used when running widget debugging
self.collapsableWBox.updateControls()
dlg = self.createColorDialog()
self.colorPalette = dlg.getDiscretePalette("discPalette")
self.selectionColorPalette = [QColor(*col) for col in OWColorPalette.defaultRGBColors]
def sendReport(self):
self.reportSettings("Settings", [("Color in cells", self.interiorColoringOpts[self.interiorColoring]),
("Subboxes", self.subboxesOpts[self.useBoxes])])
self.reportImage(lambda *x: OWDlgs.OWChooseImageSizeDlg(self.canvas).saveImage(*x))
def permutationListToggle(self):
if self.exploreAttrPermutations:
self.updateGraphAndPermList()
def setSelectedPermutation(self):
newRow = self.permutationList.currentRow()
if self.permutationList.count() > 0 and self.bestPlacements and newRow < len(self.bestPlacements):
self.removeAllSelections()
val, attrList, valueOrder = self.bestPlacements[newRow]
if len(attrList) > 0: self.attr1 = attrList[0]
if len(attrList) > 1: self.attr2 = attrList[1]
if len(attrList) > 2: self.attr3 = attrList[2]
if len(attrList) > 3: self.attr4 = attrList[3]
self.updateGraph(customValueOrderDict = dict([(attrList[i], tuple(valueOrder[i])) for i in range(len(attrList))]))
def orderAttributeValues(self):
attr = None
if self.sort1.isChecked(): attr = self.attr1
elif self.sort2.isChecked(): attr = self.attr2
elif self.sort3.isChecked(): attr = self.attr3
elif self.sort4.isChecked(): attr = self.attr4
if self.data and attr != "" and attr != "(None)":
dlg = SortAttributeValuesDlg(self, attr, self.manualAttributeValuesDict.get(attr, None) or getVariableValuesSorted(self.data.domain[attr]))
if dlg.exec_() == QDialog.Accepted:
self.manualAttributeValuesDict[attr] = [str(dlg.attributeList.item(i).text()) for i in range(dlg.attributeList.count())]
for control in [self.sort1, self.sort2, self.sort3, self.sort4]:
control.setChecked(0)
self.updateGraph()
# initialize combo boxes with discrete attributes
def initCombos(self, data):
for combo in [self.attr1Combo, self.attr2Combo, self.attr3Combo, self.attr4Combo]:
combo.clear()
if data is None: return
self.attr2Combo.addItem("(None)")
self.attr3Combo.addItem("(None)")
self.attr4Combo.addItem("(None)")
for attr in data.domain:
if attr.varType == orange.VarTypes.Discrete:
for combo in [self.attr1Combo, self.attr2Combo, self.attr3Combo, self.attr4Combo]:
combo.addItem(self.icons[orange.VarTypes.Discrete], attr.name)
if self.attr1Combo.count() > 0:
self.attr1 = str(self.attr1Combo.itemText(0))
self.attr2 = str(self.attr2Combo.itemText(0 + 2*(self.attr2Combo.count() > 2)))
self.attr3 = str(self.attr3Combo.itemText(0))
self.attr4 = str(self.attr4Combo.itemText(0))
# when we resize the widget, we have to redraw the data
def resizeEvent(self, e):
OWWidget.resizeEvent(self,e)
self.updateGraph()
def showEvent(self, ev):
OWWidget.showEvent(self, ev)
self.updateGraph()
def closeEvent(self, ce):
self.optimizationDlg.hide()
QDialog.closeEvent(self, ce)
# ------------- SIGNALS --------------------------
# # DATA signal - receive new data and update all fields
def setData(self, data):
self.closeContext()
self.data = None
self.bestPlacements = None
self.manualAttributeValuesDict = {}
self.attributeValuesDict = {}
self.information([0,1,2])
self.data = self.optimizationDlg.setData(data, self.removeUnusedValues)
if self.data:
if data.domain.hasContinuousAttributes():
self.information(0, "Continuous attributes were discretized using entropy discretization.")
if data.domain.classVar and data.hasMissingClasses():
self.information(1, "Examples with missing classes were removed.")
# if self.removeUnusedValues and len(data) != len(self.data):
# self.information(2, "Unused attribute values were removed.")
if self.data.domain.classVar and self.data.domain.classVar.varType == orange.VarTypes.Discrete:
self.interiorColoring = CLASS_DISTRIBUTION
self.colorPalette.setNumberOfColors(len(self.data.domain.classVar.values))
else:
self.interiorColoring = PEARSON
self.initCombos(self.data)
self.openContext("", self.data)
if data and self.unprocessedSubsetData: # if we first received subset data we now have to call setSubsetData to process it
self.setSubsetData(self.unprocessedSubsetData)
self.unprocessedSubsetData = None
def setSubsetData(self, data):
if not self.data:
self.unprocessedSubsetData = data
self.warning(10)
else:
try:
self.subsetData = data.select(self.data.domain)
self.warning(10)
except:
self.subsetData = None
self.warning(10, data and "'Examples' and 'Example Subset' data do not have compatible domains. Unable to draw 'Example Subset' data." or "")
# this is called by OWBaseWidget after setData and setSubsetData are called. this way the graph is updated only once
def handleNewSignals(self):
self.updateGraphAndPermList()
# ------------------------------------------------
def setShownAttributes(self, attrList, **args):
if not attrList: return
self.attr1 = attrList[0]
if len(attrList) > 1: self.attr2 = attrList[1]
else: self.attr2 = "(None)"
if len(attrList) > 2: self.attr3 = attrList[2]
else: self.attr3 = "(None)"
if len(attrList) > 3: self.attr4 = attrList[3]
else: self.attr4 = "(None)"
self.attributeValuesDict = args.get("customValueOrderDict", None)
self.updateGraphAndPermList()
def getShownAttributeList(self):
attrList = [self.attr1, self.attr2, self.attr3, self.attr4]
while "(None)" in attrList: attrList.remove("(None)")
while "" in attrList: attrList.remove("")
return attrList
def updateGraphAndPermList(self, **args):
self.removeAllSelections()
self.permutationList.clear()
if self.exploreAttrPermutations:
attrList = self.getShownAttributeList()
if not getattr(self, "bestPlacements", []) or 0 in [attr in self.bestPlacements[0][1] for attr in attrList]: # we might have bestPlacements for a different set of attributes
self.setStatusBarText("Evaluating different attribute permutations. You can stop evaluation by opening VizRank dialog and pressing 'Stop optimization' button.")
self.bestPlacements = self.optimizationDlg.optimizeCurrentAttributeOrder(attrList, updateGraph = 0)
self.setStatusBarText("")
if self.bestPlacements:
self.permutationList.addItems(["%.2f - %s" % (val, attrs) for (val, attrs, order) in self.bestPlacements])
attrList, valueOrder = self.bestPlacements[0][1], self.bestPlacements[0][2]
self.attributeValuesDict = dict([(attrList[i], tuple(valueOrder[i])) for i in range(len(attrList))])
self.updateGraph(**args)
# ############################################################################
# updateGraph - gets called every time the graph has to be updated
def updateGraph(self, data = -1, subsetData = -1, attrList = -1, **args):
# do we want to erase previous diagram?
if args.get("erasePrevious", 1):
for item in self.canvas.items():
if not isinstance(item, SelectionRectangle):
self.canvas.removeItem(item) # remove all canvas items, except SelectionCurves
self.names = []
if data == -1:
data = self.data
if subsetData == -1:
subsetData = self.subsetData
if attrList == -1:
attrList = [self.attr1, self.attr2, self.attr3, self.attr4]
if data is None : return
while "(None)" in attrList: attrList.remove("(None)")
while "" in attrList: attrList.remove("")
if attrList == []:
return
selectList = attrList
if data.domain.classVar:
data = data.select(attrList + [data.domain.classVar])
else:
data = data.select(attrList)
data = orange.Preprocessor_dropMissing(data)
if len(data) == 0:
self.warning(5, "There are no examples with valid values for currently visualized attributes. Unable to visualize.")
return
else:
self.warning(5)
self.aprioriDistributions = []
if self.interiorColoring == PEARSON:
self.aprioriDistributions = [orange.Distribution(attr, data) for attr in attrList]
if args.get("positions"):
xOff, yOff, squareSize = args.get("positions")
else:
# get the maximum width of rectangle
xOff = 50
width = 50
if len(attrList) > 1:
text = OWCanvasText(self.canvas, attrList[1], bold = 1, show = 0)
width = text.boundingRect().width() + 30 + 20
xOff = width
if len(attrList) == 4:
text = OWCanvasText(self.canvas, attrList[3], bold = 1, show = 0)
width += text.boundingRect().width() + 30 + 20
# get the maximum height of rectangle
height = 90
yOff = 40
squareSize = min(self.canvasView.width() - width - 20, self.canvasView.height() - height - 20)
if squareSize < 0: return # canvas is too small to draw rectangles
self.canvasView.setSceneRect(0, 0, self.canvasView.width(), self.canvasView.height())
self.legend = {} # dictionary that tells us, for what attributes did we already show the legend
for attr in attrList:
self.legend[attr] = 0
self.drawnSides = dict([(0,0),(1,0),(2,0),(3,0)])
self.drawPositions = {}
if not getattr(self, "attributeValuesDict", None):
self.attributeValuesDict = self.manualAttributeValuesDict
# compute distributions
self.conditionalDict = self.optimizationDlg.getConditionalDistributions(data, attrList)
self.conditionalDict[""] = len(data)
self.conditionalSubsetDict = None
if subsetData:
self.conditionalSubsetDict = self.optimizationDlg.getConditionalDistributions(subsetData, attrList)
self.conditionalSubsetDict[""] = len(subsetData)
# draw rectangles
self.DrawData(attrList, (xOff, xOff+squareSize), (yOff, yOff+squareSize), 0, "", len(attrList), **args)
if args.get("drawLegend", 1):
self.DrawLegend(data, (xOff, xOff+squareSize), (yOff, yOff+squareSize)) # draw class legend
if args.get("drillUpdateSelection", 1):
self.optimizationDlg.mtUpdateState()
#self.canvas.update()
# ############################################################################
# ############################################################################
## DRAW DATA - draw rectangles for attributes in attrList inside rect (x0,x1), (y0,y1)
def DrawData(self, attrList, (x0, x1), (y0, y1), side, condition, totalAttrs, usedAttrs = [], usedVals = [], attrVals = "", **args):
if self.conditionalDict[attrVals] == 0:
self.addRect(x0, x1, y0, y1, "", usedAttrs, usedVals, attrVals = attrVals)
self.DrawText(side, attrList[0], (x0, x1), (y0, y1), totalAttrs, usedAttrs, usedVals, attrVals) # store coordinates for later drawing of labels
return
attr = attrList[0]
edge = len(attrList) * self.cellspace # how much smaller rectangles do we draw
values = self.attributeValuesDict.get(attr, None) or getVariableValuesSorted(self.data.domain[attr])
if side%2: values = values[::-1] # reverse names if necessary
if side%2 == 0: # we are drawing on the x axis
whole = max(0, (x1-x0)-edge*(len(values)-1)) # we remove the space needed for separating different attr. values
if whole == 0: edge = (x1-x0)/float(len(values)-1)
else: # we are drawing on the y axis
whole = max(0, (y1-y0)-edge*(len(values)-1))
if whole == 0: edge = (y1-y0)/float(len(values)-1)
if attrVals == "": counts = [self.conditionalDict[val] for val in values]
else: counts = [self.conditionalDict[attrVals + "-" + val] for val in values]
total = sum(counts)
# if we are visualizing the third attribute and the first attribute has the last value, we have to reverse the order in which the boxes will be drawn
# otherwise, if the last cell, nearest to the labels of the fourth attribute, is empty, we wouldn't be able to position the labels
valRange = range(len(values))
if len(attrList + usedAttrs) == 4 and len(usedAttrs) == 2:
attr1Values = self.attributeValuesDict.get(usedAttrs[0], None) or getVariableValuesSorted(self.data.domain[usedAttrs[0]])
if usedVals[0] == attr1Values[-1]:
valRange = valRange[::-1]
for i in valRange:
start = i*edge + whole * float(sum(counts[:i])/float(total))
end = i*edge + whole * float(sum(counts[:i+1])/float(total))
val = values[i]
htmlVal = getHtmlCompatibleString(val)
if attrVals != "": newAttrVals = attrVals + "-" + val
else: newAttrVals = val
if side % 2 == 0: # if we are moving horizontally
if len(attrList) == 1: self.addRect(x0+start, x0+end, y0, y1, condition + 4*" " + attr + ": <b>" + htmlVal + "</b><br>", usedAttrs + [attr], usedVals + [val], newAttrVals, **args)
else: self.DrawData(attrList[1:], (x0+start, x0+end), (y0, y1), side +1, condition + 4*" " + attr + ": <b>" + htmlVal + "</b><br>", totalAttrs, usedAttrs + [attr], usedVals + [val], newAttrVals, **args)
else:
if len(attrList) == 1: self.addRect(x0, x1, y0+start, y0+end, condition + 4*" " + attr + ": <b> " + htmlVal + "</b><br>", usedAttrs + [attr], usedVals + [val], newAttrVals, **args)
else: self.DrawData(attrList[1:], (x0, x1), (y0+start, y0+end), side +1, condition + 4*" " + attr + ": <b>" + htmlVal + "</b><br>", totalAttrs, usedAttrs + [attr], usedVals + [val], newAttrVals, **args)
self.DrawText(side, attrList[0], (x0, x1), (y0, y1), totalAttrs, usedAttrs, usedVals, attrVals)
######################################################################
## DRAW TEXT - draw legend for all attributes in attrList and their possible values
def DrawText(self, side, attr, (x0, x1), (y0, y1), totalAttrs, usedAttrs, usedVals, attrVals):
if self.drawnSides[side]: return
# the text on the right will be drawn when we are processing visualization of the last value of the first attribute
if side == RIGHT:
attr1Values = self.attributeValuesDict.get(usedAttrs[0], None) or getVariableValuesSorted(self.data.domain[usedAttrs[0]])
if usedVals[0] != attr1Values[-1]:
return
if not self.conditionalDict[attrVals]:
if not self.drawPositions.has_key(side): self.drawPositions[side] = (x0, x1, y0, y1)
return
else:
if self.drawPositions.has_key(side): (x0, x1, y0, y1) = self.drawPositions[side] # restore the positions where we have to draw the attribute values and attribute name
self.drawnSides[side] = 1
values = self.attributeValuesDict.get(attr, None) or getVariableValuesSorted(self.data.domain[attr])
if side % 2: values = values[::-1]
width = x1-x0 - (side % 2 == 0) * self.cellspace*(totalAttrs-side)*(len(values)-1)
height = y1-y0 - (side % 2 == 1) * self.cellspace*(totalAttrs-side)*(len(values)-1)
#calculate position of first attribute
if side == 0: OWCanvasText(self.canvas, attr, x0+(x1-x0)/2, y1 + self.attributeNameOffset, Qt.AlignCenter, bold = 1)
elif side == 1: OWCanvasText(self.canvas, attr, x0 - self.attributeNameOffset, y0+(y1-y0)/2, Qt.AlignRight | Qt.AlignVCenter, bold = 1)
elif side == 2: OWCanvasText(self.canvas, attr, x0+(x1-x0)/2, y0 - self.attributeNameOffset, Qt.AlignCenter, bold = 1)
else: OWCanvasText(self.canvas, attr, x1 + self.attributeNameOffset, y0+(y1-y0)/2, Qt.AlignLeft | Qt.AlignVCenter, bold = 1)
currPos = 0
if attrVals == "": counts = [self.conditionalDict.get(val, 1) for val in values]
else: counts = [self.conditionalDict.get(attrVals + "-" + val, 1) for val in values]
total = sum(counts)
if total == 0:
counts = [1]*len(values)
total = sum(counts)
for i in range(len(values)):
val = values[i]
perc = counts[i]/float(total)
if side == 0: OWCanvasText(self.canvas, str(val), x0+currPos+width*0.5*perc, y1 + self.attributeValueOffset, Qt.AlignCenter, bold = 0)
elif side == 1: OWCanvasText(self.canvas, str(val), x0-self.attributeValueOffset, y0+currPos+height*0.5*perc, Qt.AlignRight | Qt.AlignVCenter, bold = 0)
elif side == 2: OWCanvasText(self.canvas, str(val), x0+currPos+width*perc*0.5, y0 - self.attributeValueOffset, Qt.AlignCenter, bold = 0)
else: OWCanvasText(self.canvas, str(val), x1+self.attributeValueOffset, y0 + currPos + height*0.5*perc, Qt.AlignLeft | Qt.AlignVCenter, bold = 0)
if side % 2 == 0: currPos += perc*width + self.cellspace*(totalAttrs-side)
else : currPos += perc*height+ self.cellspace*(totalAttrs-side)
# draw a rectangle, set it to back and add it to rect list
def addRect(self, x0, x1, y0, y1, condition = "", usedAttrs = [], usedVals = [], attrVals = "", **args):
if x0 == x1: x1+=1
if y0 == y1: y1+=1
if x1-x0 + y1-y0 == 2: y1+=1 # if we want to show a rectangle of width and height 1 it doesn't show anything. in such cases we therefore have to increase size of one edge
if args.has_key("selectionDict") and args["selectionDict"].has_key(tuple(usedVals)):
d = 2
OWCanvasRectangle(self.canvas, x0-d, y0-d, x1-x0+1+2*d, y1-y0+1+2*d, penColor = args["selectionDict"][tuple(usedVals)], penWidth = 2, z = -100)
# if we have selected a rule that contains this combination of attr values then show a kind of selection of this rectangle
if self.activeRule and len(usedAttrs) == len(self.activeRule[0]) and sum([v in usedAttrs for v in self.activeRule[0]]) == len(self.activeRule[0]):
for vals in self.activeRule[1]:
if usedVals == [vals[self.activeRule[0].index(a)] for a in usedAttrs]:
values = list(self.attributeValuesDict.get(self.data.domain.classVar.name, [])) or getVariableValuesSorted(self.data.domain.classVar)
counts = [self.conditionalDict[attrVals + "-" + val] for val in values]
d = 2
r = OWCanvasRectangle(self.canvas, x0-d, y0-d, x1-x0+2*d+1, y1-y0+2*d+1, z = 50)
r.setPen(QPen(self.colorPalette[counts.index(max(counts))], 2, Qt.DashLine))
aprioriDist = None; pearson = None; expected = None
outerRect = OWCanvasRectangle(self.canvas, x0, y0, x1-x0, y1-y0, z = 30)
if not self.conditionalDict[attrVals]: return
# we have to remember which conditions were new in this update so that when we right click we can only remove the last added selections
if self.selectionRectangle is not None and self.selectionRectangle.collidesWithItem(outerRect) and tuple(usedVals) not in self.selectionConditions:
self.recentlyAdded = getattr(self, "recentlyAdded", []) + [tuple(usedVals)]
self.selectionConditions = self.selectionConditions + [tuple(usedVals)]
# show rectangle selected or not
if tuple(usedVals) in self.selectionConditions:
outerRect.setPen(QPen(Qt.black, 3, Qt.DotLine))
if self.interiorColoring == CLASS_DISTRIBUTION and (not self.data.domain.classVar or not self.data.domain.classVar.varType == orange.VarTypes.Discrete):
return
# draw pearsons residuals
if self.interiorColoring == PEARSON or not self.data.domain.classVar or self.data.domain.classVar.varType != orange.VarTypes.Discrete:
s = sum(self.aprioriDistributions[0])
expected = s * reduce(lambda x, y: x*y, [self.aprioriDistributions[i][usedVals[i]]/float(s) for i in range(len(usedVals))])
actual = self.conditionalDict[attrVals]
pearson = float(actual - expected) / sqrt(expected)
if abs(pearson) < 2: ind = 0
elif abs(pearson) < 4: ind = 1
elif abs(pearson) < 8: ind = 2
else: ind = 3
if pearson > 0: color = self.blueColors[ind]
else: color = self.redColors[ind]
OWCanvasRectangle(self.canvas, x0, y0, x1-x0, y1-y0, color, color, z = -20)
# draw class distribution - actual and apriori
# we do have a discrete class
else:
clsValues = list(self.attributeValuesDict.get(self.data.domain.classVar.name, [])) or getVariableValuesSorted(self.data.domain.classVar)
aprioriDist = orange.Distribution(self.data.domain.classVar.name, self.data)
total = 0
for i in range(len(clsValues)):
val = self.conditionalDict[attrVals + "-" + clsValues[i]]
if val == 0:
continue
if self.horizontalDistribution:
if i == len(clsValues)-1: v = x1-x0 - total
else: v = ((x1-x0)* val)/self.conditionalDict[attrVals]
OWCanvasRectangle(self.canvas, x0+total, y0, v, y1-y0, self.colorPalette[i], self.colorPalette[i], z = -20)
else:
if i == len(clsValues)-1: v = y1-y0 - total
else: v = ((y1-y0)* val)/self.conditionalDict[attrVals]
OWCanvasRectangle(self.canvas, x0, y0+total, x1-x0, v, self.colorPalette[i], self.colorPalette[i], z = -20)
total += v
# show apriori boxes and lines
if (self.showAprioriDistributionLines or self.useBoxes) and abs(x1 - x0) > self.boxSize and abs(y1 - y0) > self.boxSize:
apriori = [aprioriDist[val]/float(len(self.data)) for val in clsValues]
if self.showAprioriDistributionBoxes or self.data.domain.classVar.name in usedAttrs: # we want to show expected class distribution under independence hypothesis
boxCounts = apriori
else:
contingencies = self.optimizationDlg.getContingencys(usedAttrs)
boxCounts = []
for clsVal in clsValues:
# compute: P(c_i) * prod (P(c_i|attr_k) / P(c_i)) for each class value
Pci = aprioriDist[clsVal]/float(sum(aprioriDist.values()))
tempVal = Pci
if Pci > 0:
#tempVal = 1.0 / Pci
for i in range(len(usedAttrs)):
tempVal *= contingencies[usedAttrs[i]][usedVals[i]][clsVal] / Pci
boxCounts.append(tempVal)
#boxCounts.append(aprioriDist[val]/float(sum(aprioriDist.values())) * reduce(operator.mul, [contingencies[usedAttrs[i]][usedVals[i]][clsVal]/float(sum(contingencies[usedAttrs[i]][usedVals[i]].values())) for i in range(len(usedAttrs))]))
total1 = 0; total2 = 0
if self.useBoxes:
if self.horizontalDistribution: OWCanvasLine(self.canvas, x0, y0+self.boxSize, x1, y0+self.boxSize, z = 30)
else: OWCanvasLine(self.canvas, x0+self.boxSize, y0, x0+self.boxSize, y1, z = 30)
for i in range(len(clsValues)):
val1 = apriori[i]
if self.showAprioriDistributionBoxes: val2 = apriori[i]
else: val2 = boxCounts[i]/float(sum(boxCounts))
if self.horizontalDistribution:
if i == len(clsValues)-1:
v1 = x1-x0 - total1
v2 = x1-x0 - total2
else:
v1 = (x1-x0)* val1
v2 = (x1-x0)* val2
x,y,w,h, xL1, yL1, xL2, yL2 = x0+total2, y0, v2, self.boxSize, x0+total1+v1, y0, x0+total1+v1, y1
else:
if i== len(clsValues)-1:
v1 = y1-y0 - total1
v2 = y1-y0 - total2
else:
v1 = (y1-y0)* val1
v2 = (y1-y0)* val2
x,y,w,h, xL1, yL1, xL2, yL2 = x0, y0+total2, self.boxSize, v2, x0, y0+total1+v1, x1, y0+total1+v1
if self.useBoxes:
OWCanvasRectangle(self.canvas, x, y, w, h, self.colorPalette[i], self.colorPalette[i], z = 20)
if i < len(clsValues)-1 and self.showAprioriDistributionLines:
OWCanvasLine(self.canvas, xL1, yL1, xL2, yL2, z = 10)
total1 += v1
total2 += v2
# show subset distribution
if self.conditionalSubsetDict:
# show a rect around the box if subset examples belong to this box
if self.conditionalSubsetDict[attrVals]:
#counts = [self.conditionalSubsetDict[attrVals + "-" + val] for val in clsValues]
#if sum(counts) == 1: color = self.colorPalette[counts.index(1)]
#else: color = Qt.black
#OWCanvasRectangle(self.canvas, x0-2, y0-2, x1-x0+5, y1-y0+5, color, QColor(Qt.white), penWidth = 2, z=-50, penStyle = Qt.DashLine)
counts = [self.conditionalSubsetDict[attrVals + "-" + val] for val in clsValues]
if sum(counts) == 1:
OWCanvasRectangle(self.canvas, x0-2, y0-2, x1-x0+5, y1-y0+5, self.colorPalette[counts.index(1)], QColor(Qt.white), penWidth = 2, z=-50, penStyle = Qt.DashLine)
if self.showSubsetDataBoxes: # do we want to show exact distribution in the right edge of each cell
if self.horizontalDistribution: OWCanvasLine(self.canvas, x0, y1-self.boxSize, x1, y1-self.boxSize, z = 30)
else: OWCanvasLine(self.canvas, x1-self.boxSize, y0, x1-self.boxSize, y1, z = 30)
total = 0
for i in range(len(aprioriDist)):
val = self.conditionalSubsetDict[attrVals + "-" + clsValues[i]]
if not self.conditionalSubsetDict[attrVals] or val == 0: continue
if self.horizontalDistribution:
if i == len(aprioriDist)-1: v = x1-x0 - total
else: v = ((x1-x0)* val)/float(self.conditionalSubsetDict[attrVals])
OWCanvasRectangle(self.canvas, x0+total, y1-self.boxSize, v, self.boxSize, self.colorPalette[i], self.colorPalette[i], z = 15)
else:
if i == len(aprioriDist)-1: v = y1-y0 - total
else: v = ((y1-y0)* val)/float(self.conditionalSubsetDict[attrVals])
OWCanvasRectangle(self.canvas, x1-self.boxSize, y0+total, self.boxSize, v, self.colorPalette[i], self.colorPalette[i], z = 15)
total += v
tooltipText = "Examples in this area have:<br>" + condition
if aprioriDist:
clsValues = list(self.attributeValuesDict.get(self.data.domain.classVar.name, [])) or getVariableValuesSorted(self.data.domain.classVar)
actual = [self.conditionalDict[attrVals + "-" + clsValues[i]] for i in range(len(aprioriDist))]
if sum(actual) > 0:
apriori = [aprioriDist[key] for key in clsValues]
aprioriText = ""; actualText = ""
text = ""
for i in range(len(clsValues)):
text += 4*" " + "<b>%s</b>: %d / %.1f%% (Expected %.1f / %.1f%%)<br>" % (clsValues[i], actual[i], 100.0*actual[i]/float(sum(actual)), (apriori[i]*sum(actual))/float(sum(apriori)), 100.0*apriori[i]/float(sum(apriori)))
tooltipText += "Number of examples: " + str(int(sum(actual))) + "<br> Class distribution:<br>" + text[:-4]
elif pearson and expected:
tooltipText += "<hr>Expected number of examples: %.1f<br>Actual number of examples: %d<br>Standardized (Pearson) residual: %.1f" % (expected, self.conditionalDict[attrVals], pearson)
outerRect.setToolTip(tooltipText)
# draw the class legend below the square
def DrawLegend(self, data, (x0, x1), (y0, y1)):
if self.interiorColoring == CLASS_DISTRIBUTION and (not data.domain.classVar or data.domain.classVar.varType == orange.VarTypes.Continuous):
return
if self.interiorColoring == PEARSON:
names = ["<-8", "-8:-4", "-4:-2", "-2:2", "2:4", "4:8", ">8", "Residuals:"]
colors = self.redColors[::-1] + self.blueColors[1:]
else:
names = (list(self.attributeValuesDict.get(data.domain.classVar.name, [])) or getVariableValuesSorted(data.domain.classVar)) + [data.domain.classVar.name+":"]
colors = [self.colorPalette[i] for i in range(len(data.domain.classVar.values))]
self.names = [OWCanvasText(self.canvas, name, alignment = Qt.AlignVCenter) for name in names]
totalWidth = sum([text.boundingRect().width() for text in self.names])
# compute the x position of the center of the legend
y = y1 + self.attributeNameOffset + 20
distance = 30
startX = (x0+x1)/2 - (totalWidth + (len(names))*distance)/2
self.names[-1].setPos(startX+15, y); self.names[-1].show()
xOffset = self.names[-1].boundingRect().width() + distance
size = 8 # 8 + 8*(self.interiorColoring == PEARSON)
for i in range(len(names)-1):
if self.interiorColoring == PEARSON: edgeColor = Qt.black
else: edgeColor = colors[i]
OWCanvasRectangle(self.canvas, startX + xOffset, y-size/2, size, size, edgeColor, colors[i])
self.names[i].setPos(startX + xOffset + 10, y)
xOffset += distance + self.names[i].boundingRect().width()
def saveToFileCanvas(self):
sizeDlg = OWDlgs.OWChooseImageSizeDlg(self.canvas, parent=self)
sizeDlg.exec_()
def setColors(self):
dlg = self.createColorDialog()
if dlg.exec_():
self.colorSettings = dlg.getColorSchemas()
self.selectedSchemaIndex = dlg.selectedSchemaIndex
self.colorPalette = dlg.getDiscretePalette("discPalette")
if self.data and self.data.domain.classVar and self.data.domain.classVar.varType == orange.VarTypes.Discrete:
self.colorPalette.setNumberOfColors(len(self.data.domain.classVar.values))
self.updateGraph()
def createColorDialog(self):
c = OWColorPalette.ColorPaletteDlg(self, "Color Palette")
c.createDiscretePalette("discPalette", "Discrete Palette", OWColorPalette.defaultRGBColors) #defaultColorBrewerPalette)
c.setColorSchemas(self.colorSettings, self.selectedSchemaIndex)
return c
# ########################################
# cell/example selection
def sendSelectedData(self):
# send the selected examples
self.send("Selected Data", self.getSelectedExamples())
# add a new rectangle. update the graph and see which mosaics does it intersect. add this mosaics to the recentlyAdded list
def addSelection(self, rect):
self.selectionRectangle = rect
self.updateGraph(drillUpdateSelection = 0)
self.sendSelectedData()
if getattr(self, "recentlyAdded", []):
self.selectionConditionsHistorically = self.selectionConditionsHistorically + [self.recentlyAdded]
self.recentlyAdded = []
self.optimizationDlg.mtUpdateState() # we have already called this in self.updateGraph() call
self.selectionRectangle = None
# remove the mosaics that were added with the last selection rectangle
def removeLastSelection(self):
if self.selectionConditionsHistorically:
vals = self.selectionConditionsHistorically.pop()
for val in vals:
if tuple(val) in self.selectionConditions:
self.selectionConditions.remove(tuple(val))
self.updateGraph()
## self.optimizationDlg.mtUpdateState() # we have already called this in self.updateGraph() call
self.sendSelectedData()
def removeAllSelections(self):
self.selectionConditions = []
self.selectionConditionsHistorically = []
## self.optimizationDlg.mtUpdateState() # removeAllSelections is always called before updateGraph() - where mtUpdateState is called
self.sendSelectedData()
# return examples in currently selected boxes as example table or array of 0/1 values
def getSelectedExamples(self, asExampleTable = 1, negate = 0, selectionConditions = None, data = None, attrs = None):
if attrs is None: attrs = self.getShownAttributeList()
if data is None: data = self.data
if selectionConditions is None: selectionConditions = self.selectionConditions
if attrs == [] or not data:
return None
pp = orange.Preprocessor_take()
sumIndices = numpy.zeros(len(data))
for val in selectionConditions:
for i, attr in enumerate(attrs):
pp.values[data.domain[attr]] = val[i]
indices = numpy.array(pp.selectionVector(data))
sumIndices += indices
selectedIndices = list(numpy.where(sumIndices > 0, 1 - negate, 0 + negate))
if asExampleTable:
return data.selectref(selectedIndices)
else:
return selectedIndices
def saveSettings(self):
OWWidget.saveSettings(self)
self.optimizationDlg.saveSettings()
class SortAttributeValuesDlg(OWBaseWidget):
def __init__(self, parentWidget = None, attr = "", valueList = []):
OWBaseWidget.__init__(self, None, None, "Sort Attribute Values", modal = TRUE)
self.setLayout(QVBoxLayout())
#self.space = QWidget(self)
#self.layout = QVBoxLayout(self, 4)
#self.layout.addWidget(self.space)
box1 = OWGUI.widgetBox(self, "Select Value Order for Attribute \"" + attr + '"', orientation = "horizontal")
self.attributeList = OWGUI.listBox(box1, self, selectionMode = QListWidget.ExtendedSelection, enableDragDrop = 1)
self.attributeList.addItems(valueList)
vbox = OWGUI.widgetBox(box1, "", orientation = "vertical")
self.buttonUPAttr = OWGUI.button(vbox, self, "", callback = self.moveAttrUP, tooltip="Move selected attribute values up")
self.buttonDOWNAttr = OWGUI.button(vbox, self, "", callback = self.moveAttrDOWN, tooltip="Move selected attribute values down")
self.buttonUPAttr.setIcon(QIcon(os.path.join(self.widgetDir, "icons/Dlg_up3.png")))
self.buttonUPAttr.setSizePolicy(QSizePolicy(QSizePolicy.Fixed , QSizePolicy.Expanding))
self.buttonUPAttr.setFixedWidth(40)
self.buttonDOWNAttr.setIcon(QIcon(os.path.join(self.widgetDir, "icons/Dlg_down3.png")))
self.buttonDOWNAttr.setSizePolicy(QSizePolicy(QSizePolicy.Fixed , QSizePolicy.Expanding))
self.buttonDOWNAttr.setFixedWidth(40)
box2 = OWGUI.widgetBox(self, 1, orientation = "horizontal")
self.okButton = OWGUI.button(box2, self, "OK", callback = self.accept)
self.cancelButton = OWGUI.button(box2, self, "Cancel", callback = self.reject)
self.resize(300, 300)
# move selected attribute values
def moveAttrUP(self):
for i in range(1, self.attributeList.count()):
if self.attributeList.item(i).isSelected():
self.attributeList.insertItem(i-1, self.attributeList.item(i).text())
self.attributeList.takeItem(i+1)
self.attributeList.item(i-1).setSelected(TRUE)
def moveAttrDOWN(self):
for i in range(self.attributeList.count()-2,-1,-1):
if self.attributeList.item(i).isSelected():
self.attributeList.insertItem(i+2, self.attributeList.item(i).text())
self.attributeList.item(i+2).setSelected(TRUE)
self.attributeList.takeItem(i)
#test widget appearance
if __name__=="__main__":
a=QApplication(sys.argv)
ow = OWMosaicDisplay()
ow.show()
# data = orange.ExampleTable(r"e:\Development\Orange Datasets\UCI\zoo.tab")
data = orange.ExampleTable("../../doc/datasets/zoo.tab")
ow.setData(data)
ow.handleNewSignals()
# for d in ["zoo.tab", "iris.tab", "zoo.tab"]:
# data = orange.ExampleTable(r"e:\Development\Orange Datasets\UCI\\" + d)
# ow.setData(data)
# ow.handleNewSignals()
a.exec_()
| gpl-3.0 |
atavory/ibex | ibex/__init__.py | 1 | 1091 | """
Pandas adapters for sklearn-type estimators
"""
from ._base import *
from ._adapter import *
from ._function_transformer import *
import sklearn
__all__ = []
__version__ = '0.1.0'
__all__ += ['__version__']
__all__ += ['FrameMixin']
__all__ += ['frame', 'frame_ex']
__all__ += ['trans']
__all__ += ['sklearn']
def trans(func=None, in_cols=None, out_cols=None, pass_y=False, kw_args=None):
"""
Arguments:
func: One of:
* ``None``
* a callable
* a step
in_cols: One of:
* ``None``
* a string
* a list of strings
out_cols:
pass_y: Boolean indicating whether to pass the ``y`` argument to
kw_args:
Returns:
An :py:class:`sklearn.preprocessing.FunctionTransformer` object.
See the `documentation <https://atavory.github.io/ibex/function_transformer.html>`_ for examples.
"""
from ibex.sklearn import preprocessing
return preprocessing.FunctionTransformer(func, in_cols, out_cols, pass_y, kw_args)
__all__ += ['trans']
| bsd-3-clause |
woobe/h2o | bench/BMscripts/gbmBench.py | 11 | 11141 | #GBM bench
import os, sys, time, csv, string
sys.path.append('../py/')
sys.path.extend(['.','..'])
import h2o_cmd, h2o, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_rf, h2o_jobs
csv_header = ('h2o_build','nMachines','nJVMs','Xmx/JVM','dataset','nTrainRows','nTestRows','nCols','trainParseWallTime','nTrees','minRows','maxDepth','learnRate','classification','gbmBuildTime','Error')
files = {'Airlines' : {'train': ('AirlinesTrain1x', 'AirlinesTrain10x', 'AirlinesTrain100x'), 'test' : 'AirlinesTest'},
'AllBedrooms': {'train': ('AllBedroomsTrain1x', 'AllBedroomsTrain10x', 'AllBedroomsTrain100x'), 'test' : 'AllBedroomsTest'},
'Covtype' : {'train': ('CovTypeTrain1x', 'CovTypeTrain10x', 'CovTypeTrain100x'), 'test' : 'CovTypeTest'},
}
build = ""
debug = False
json = ""
def doGBM(f, folderPath, ignored_cols, classification, testFilehex, ntrees, depth, minrows, nbins, learnRate, response, row):
debug = False
bench = "bench"
if debug:
print "Doing GBM DEBUG"
bench = "bench/debug"
#date = '-'.join([str(x) for x in list(time.localtime())][0:3])
overallWallStart = time.time()
pre = ""
if debug: pre = 'DEBUG'
gbmbenchcsv = 'benchmarks/'+build+'/'+pre+'gbmbench.csv'
if not os.path.exists(gbmbenchcsv):
output = open(gbmbenchcsv,'w')
output.write(','.join(csv_header)+'\n')
else:
output = open(gbmbenchcsv,'a')
csvWrt = csv.DictWriter(output, fieldnames=csv_header, restval=None,
dialect='excel', extrasaction='ignore',delimiter=',')
try:
java_heap_GB = h2o.nodes[0].java_heap_GB
importFolderPath = bench + "/" + folderPath
if (f in ['AirlinesTrain1x','AllBedroomsTrain1x', 'AllBedroomsTrain10x', 'AllBedroomsTrain100x','CovTypeTrain1x', 'CovTypeTrain10x', 'CovTypeTrain100x']):
csvPathname = importFolderPath + "/" + f + '.csv'
else:
csvPathname = importFolderPath + "/" + f + "/*linked*"
hex_key = f + '.hex'
hK = folderPath + "Header.csv"
headerPathname = importFolderPath + "/" + hK
h2i.import_only(bucket='home-0xdiag-datasets', path=headerPathname)
headerKey = h2i.find_key(hK)
trainParseWallStart = time.time()
h2o.beta_features = False #ensure this is false!
if f in (['AirlinesTrain10x', 'AirlinesTrain100x']): h2o.beta_features = False #regex parsing acting weird when not using browser, use VA -> FVEC converter
parseResult = h2i.import_parse(bucket = 'home-0xdiag-datasets',
path = csvPathname,
schema = 'local',
hex_key = hex_key,
header = 1,
header_from_file = headerKey,
separator = 44,
timeoutSecs = 16000,
retryDelaySecs = 5,
pollTimeoutSecs = 16000,
noPoll = True,
doSummary = False
)
h2o_jobs.pollWaitJobs(timeoutSecs=16000, pollTimeoutSecs=16000, retryDelaySecs=5)
parseWallTime = time.time() - trainParseWallStart
print "Parsing training file took ", parseWallTime ," seconds."
h2o.beta_features = False #make sure false for the inspect as well!
inspect_train = h2o.nodes[0].inspect(hex_key, timeoutSecs=16000)
inspect_test = h2o.nodes[0].inspect(testFilehex, timeoutSecs=16000)
h2o.beta_features = True #ok, can be true again
nMachines = 1 if len(h2o_hosts.hosts) is 0 else len(h2o_hosts.hosts)
row.update( {'h2o_build' : build,
'nMachines' : nMachines,
'nJVMs' : len(h2o.nodes),
'Xmx/JVM' : java_heap_GB,
'dataset' : f,
'nTrainRows' : inspect_train['num_rows'],
'nTestRows' : inspect_test['num_rows'],
'nCols' : inspect_train['num_cols'],
'trainParseWallTime' : parseWallTime,
'nTrees' : ntrees,
'minRows' : minrows,
'maxDepth' : depth,
'learnRate' : learnRate,
'classification' : classification,
})
params = {'destination_key' : 'GBM('+f+')',
'response' : response,
'ignored_cols_by_name' : ignored_cols,
'classification' : classification,
'validation' : testFilehex,
'ntrees' : ntrees,
'max_depth' : depth,
'min_rows' : minrows,
'nbins' : nbins,
'learn_rate' : learnRate,
}
parseResult = {'destination_key' : hex_key}
kwargs = params.copy()
gbmStart = time.time()
#TODO(spencer): Uses jobs to poll for gbm completion
gbm = h2o_cmd.runGBM(parseResult = parseResult, noPoll=True, timeoutSecs=4800, **kwargs)
h2o_jobs.pollWaitJobs(timeoutSecs=16000, pollTimeoutSecs=120, retryDelaySecs=5)
gbmTime = time.time() - gbmStart
cmd = 'bash startloggers.sh ' + json + ' stop_'
os.system(cmd)
row.update( {'gbmBuildTime' : gbmTime,
})
gbmTrainView = h2o_cmd.runGBMView(model_key='GBM('+f+')')
if classification:
cm = gbmTrainView['gbm_model']['cm']
err = 1.0*(cm[0][1] + cm[1][0]) / (cm[0][0] + cm[0][1] + cm[1][0] + cm[1][1])
else:
err = gbmTrainView['gbm_model']['errs'][-1]
row.update({'Error' : err})
csvWrt.writerow(row)
finally:
output.close()
if __name__ == '__main__':
dat = sys.argv.pop(-1)
debug = sys.argv.pop(-1)
build = sys.argv.pop(-1)
json = sys.argv[-1].split('/')[-1]
h2o.parse_our_args()
h2o_hosts.build_cloud_with_hosts(enable_benchmark_log=False)
fp = 'Airlines' if 'Air' in dat else 'AllBedrooms'
bench = "bench"
h2o.beta_features = True
debug = False
if debug:
bench = "bench/debug"
if dat == 'Air1x' : fs = files['Airlines']['train'][0]
if dat == 'Air10x' : fs = files['Airlines']['train'][1]
if dat == 'Air100x' : fs = files['Airlines']['train'][2]
if dat == 'AllB1x' : fs = files['AllBedrooms']['train'][0]
if dat == 'AllB10x' : fs = files['AllBedrooms']['train'][1]
if dat == 'AllB100x' : fs = files['AllBedrooms']['train'][2]
if fp == "Airlines":
#AIRLINES
airlinesTestParseStart = time.time()
hK = "AirlinesHeader.csv"
headerPathname = bench+"/Airlines" + "/" + hK
h2i.import_only(bucket='home-0xdiag-datasets', path=headerPathname)
headerKey = h2i.find_key(hK)
testFile = h2i.import_parse(bucket='home-0xdiag-datasets', path=bench+'/Airlines/AirlinesTest.csv', schema='local', hex_key="atest.hex", header=1, header_from_file=headerKey, separator=44, noPoll=True,doSummary=False)
h2o_jobs.pollWaitJobs(timeoutSecs=16000, pollTimeoutSecs=16000, retryDelaySecs=5)
elapsedAirlinesTestParse = time.time() - airlinesTestParseStart
row = {'testParseWallTime' : elapsedAirlinesTestParse}
response = 'IsDepDelayed'
ignored = None
doGBM(fs, fp,
ignored_cols = ignored,
classification = 1,
testFilehex = 'atest.hex',
ntrees = 100,
depth = 5,
minrows = 10,
nbins = 100,
learnRate = 0.01,
response = response,
row = row
)
if fp == "AllBedrooms":
#ALLBEDROOMS
allBedroomsTestParseStart = time.time()
hK = "AllBedroomsHeader.csv"
headerPathname = bench+"/AllBedrooms" + "/" + hK
h2i.import_only(bucket='home-0xdiag-datasets', path=headerPathname)
headerKey = h2i.find_key(hK)
testFile = h2i.import_parse(bucket='home-0xdiag-datasets', path=bench+'/AllBedrooms/AllBedroomsTest.csv', schema='local', hex_key="allBTest.hex", header=1, header_from_file=headerKey, separator=44,noPoll=True,doSummary=False)
h2o_jobs.pollWaitJobs(timeoutSecs=16000, pollTimeoutSecs=16000, retryDelaySecs=5)
elapsedAllBedroomsTestParse = time.time() - allBedroomsTestParseStart
row = {'testParseWallTime' : elapsedAllBedroomsTestParse}
response = 'medrent'
ignored = None
doGBM(fs, fp,
ignored_cols = ignored,
classification = 0,
testFilehex = "allBTest.hex",
ntrees = 100,
depth = 5,
minrows = 10,
nbins = 100,
learnRate = 0.01,
response = response,
row = row
)
#COVTYPE
#covTypeTestParseStart = time.time()
#hK = "CovTypeHeader.csv"
#headerPathname = bench+"/CovType" + "/" + hK
#h2i.import_only(bucket='home-0xdiag-datasets', path=headerPathname)
#headerKey = h2i.find_key(hK)
#testFile = h2i.import_parse(bucket='home-0xdiag-datasets', path=bench+'/CovType/CovTypeTest.csv', schema='local', hex_key="covTtest.hex", header=1, header_from_file=headerKey, separator=44, noPoll=True,doSummary=False)
#h2o_jobs.pollWaitJobs(timeoutSecs=16000, pollTimeoutSecs=16000, retryDelaySecs=5)
#elapsedCovTypeTestParse = time.time() - covTypeTestParseStart
#row = {'testParseWallTime' : elapsedCovTypeTestParse}
#response = 'C55'
#ignored = None
#doGBM(files['Covtype'], folderPath='CovType',
# ignored_cols = ignored,
# classification = 1,
# testFilehex = testFile['destination_key'],
# ntrees = 100,
# depth = 5,
# minrows = 10,
# nbins = 100,
# learnRate = 0.01,
# response = response,
# row = row
# )
h2o.tear_down_cloud()
| apache-2.0 |
Akshay0724/scikit-learn | sklearn/model_selection/tests/test_validation.py | 17 | 42257 | """Test the validation module"""
from __future__ import division
import sys
import warnings
import tempfile
import os
from time import sleep
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.model_selection._validation import _check_is_permutation
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator
from sklearn.multiclass import OneVsRestClassifier
from sklearn.utils import shuffle
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection.tests.common import OneTimeSplitter
from sklearn.model_selection import GridSearchCV
try:
WindowsError
except NameError:
WindowsError = None
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
# XXX: use 2D array, since 1D X is being detected as a single sample in
# check_consistent_length
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
# The number of samples per class needs to be > n_splits,
# for StratifiedKFold(n_splits=3)
y2 = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 3])
P_sparse = coo_matrix(np.eye(5))
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cross_val_score(clf, X, y2)
assert_array_equal(scores, clf.score(X, y2))
# test with multioutput y
multioutput_y = np.column_stack([y2, y2[::-1]])
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
scores = cross_val_score(clf, X_sparse, y2)
assert_array_equal(scores, clf.score(X_sparse, y2))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cross_val_score(clf, X.tolist(), y2.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cross_val_score(clf, X, y2.tolist())
assert_raises(ValueError, cross_val_score, clf, X, y2, scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cross_val_score(clf, X_3d, y2)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cross_val_score, clf, X_3d, y2)
def test_cross_val_score_predict_groups():
# Check if ValueError (when groups is None) propagates to cross_val_score
# and cross_val_predict
# And also check if groups is correctly passed to the cv object
X, y = make_classification(n_samples=20, n_classes=2, random_state=0)
clf = SVC(kernel="linear")
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(),
GroupShuffleSplit()]
for cv in group_cvs:
assert_raise_message(ValueError,
"The groups parameter should not be None",
cross_val_score, estimator=clf, X=X, y=y, cv=cv)
assert_raise_message(ValueError,
"The groups parameter should not be None",
cross_val_predict, estimator=clf, X=X, y=y, cv=cv)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
# 3 fold cross val is used so we need atleast 3 samples per class
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
kfold = KFold(5)
scores_indices = cross_val_score(svm, X, y, cv=kfold)
kfold = KFold(5)
cv_masks = []
for train, test in kfold.split(X, y):
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cross_val_score(svm, X, y)
assert_array_almost_equal(score_precomputed, score_linear)
# test with callable
svm = SVC(kernel=lambda x, y: np.dot(x, y.T))
score_callable = cross_val_score(svm, X, y)
assert_array_almost_equal(score_precomputed, score_callable)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cross_val_score, BrokenEstimator(), X)
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cross_val_score(reg, X, y, cv=5,
scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_group, _, pvalue_group = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
groups=np.ones(y.size), random_state=0)
assert_true(score_group == score)
assert_true(pvalue_group == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = StratifiedKFold(2)
score_group, _, pvalue_group = permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", groups=np.ones(y.size), random_state=0)
assert_true(score_group == score)
assert_true(pvalue_group == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum()) /
y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
permutation_test_score(p, X, y, cv=5)
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cross_val_score(p, X, y, cv=5)
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cross_val_score(clf, X, y, scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = KFold()
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = LeaveOneOut()
preds = cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
class BadCV():
def split(self, X, y=None, groups=None):
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cross_val_predict, est, X, y, cv=BadCV())
def test_cross_val_predict_input_types():
iris = load_iris()
X, y = iris.data, iris.target
X_sparse = coo_matrix(X)
multioutput_y = np.column_stack([y, y[::-1]])
clf = Ridge(fit_intercept=False, random_state=0)
# 3 fold cv is used --> atleast 3 samples per class
# Smoke test
predictions = cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_equal(predictions.shape, (150, 2))
predictions = cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (150,))
# test with multioutput y
predictions = cross_val_predict(clf, X_sparse, multioutput_y)
assert_array_equal(predictions.shape, (150, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (150,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_predict(clf, X_df, y_ser)
def test_cross_val_score_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_learning_curve():
n_samples = 30
n_splits = 3
X, y = make_classification(n_samples=n_samples, n_features=1,
n_informative=1, n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(n_samples * ((n_splits - 1) / n_splits))
for shuffle_train in [False, True]:
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=KFold(n_splits=n_splits),
train_sizes=np.linspace(0.1, 1.0, 10),
shuffle=shuffle_train)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
# Test a custom cv splitter that can iterate only once
with warnings.catch_warnings(record=True) as w:
train_sizes2, train_scores2, test_scores2 = learning_curve(
estimator, X, y,
cv=OneTimeSplitter(n_splits=n_splits, n_samples=n_samples),
train_sizes=np.linspace(0.1, 1.0, 10),
shuffle=shuffle_train)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores2, train_scores)
assert_array_almost_equal(test_scores2, test_scores)
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
for shuffle_train in [False, True]:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10), shuffle=shuffle_train)
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n_splits=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_with_shuffle():
# Following test case was designed this way to verify the code
# changes made in pull request: #7506.
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [11, 12], [13, 14], [15, 16],
[17, 18], [19, 20], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18]])
y = np.array([1, 1, 1, 2, 3, 4, 1, 1, 2, 3, 4, 1, 2, 3, 4])
groups = np.array([1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 4, 4, 4, 4])
# Splits on these groups fail without shuffle as the first iteration
# of the learning curve doesn't contain label 4 in the training set.
estimator = PassiveAggressiveClassifier(shuffle=False)
cv = GroupKFold(n_splits=2)
train_sizes_batch, train_scores_batch, test_scores_batch = learning_curve(
estimator, X, y, cv=cv, n_jobs=1, train_sizes=np.linspace(0.3, 1.0, 3),
groups=groups, shuffle=True, random_state=2)
assert_array_almost_equal(train_scores_batch.mean(axis=1),
np.array([0.75, 0.3, 0.36111111]))
assert_array_almost_equal(test_scores_batch.mean(axis=1),
np.array([0.36111111, 0.25, 0.25]))
assert_raises(ValueError, learning_curve, estimator, X, y, cv=cv, n_jobs=1,
train_sizes=np.linspace(0.3, 1.0, 3), groups=groups)
train_sizes_inc, train_scores_inc, test_scores_inc = learning_curve(
estimator, X, y, cv=cv, n_jobs=1, train_sizes=np.linspace(0.3, 1.0, 3),
groups=groups, shuffle=True, random_state=2,
exploit_incremental_learning=True)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
def test_validation_curve_cv_splits_consistency():
n_samples = 100
n_splits = 5
X, y = make_classification(n_samples=100, random_state=0)
scores1 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=OneTimeSplitter(n_splits=n_splits,
n_samples=n_samples))
# The OneTimeSplitter is a non-re-entrant cv splitter. Unless, the
# `split` is called for each parameter, the following should produce
# identical results for param setting 1 and param setting 2 as both have
# the same C value.
assert_array_almost_equal(*np.vsplit(np.hstack(scores1)[(0, 2, 1, 3), :],
2))
scores2 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=KFold(n_splits=n_splits, shuffle=True))
# For scores2, compare the 1st and 2nd parameter's scores
# (Since the C value for 1st two param setting is 0.1, they must be
# consistent unless the train test folds differ between the param settings)
assert_array_almost_equal(*np.vsplit(np.hstack(scores2)[(0, 2, 1, 3), :],
2))
scores3 = validation_curve(SVC(kernel='linear', random_state=0), X, y,
'C', [0.1, 0.1, 0.2, 0.2],
cv=KFold(n_splits=n_splits))
# OneTimeSplitter is basically unshuffled KFold(n_splits=5). Sanity check.
assert_array_almost_equal(np.array(scores3), np.array(scores1))
def test_check_is_permutation():
rng = np.random.RandomState(0)
p = np.arange(100)
rng.shuffle(p)
assert_true(_check_is_permutation(p, 100))
assert_false(_check_is_permutation(np.delete(p, 23), 100))
p[0] = 23
assert_false(_check_is_permutation(p, 100))
# Check if the additional duplicate indices are caught
assert_false(_check_is_permutation(np.hstack((p, 0)), 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cross_val_predict(classif, X, y, cv=10)
preds_sparse = cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
def check_cross_val_predict_with_method(est):
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=0)
classes = len(set(y))
kfold = KFold(len(iris.target))
methods = ['decision_function', 'predict_proba', 'predict_log_proba']
for method in methods:
predictions = cross_val_predict(est, X, y, method=method)
assert_equal(len(predictions), len(y))
expected_predictions = np.zeros([len(y), classes])
func = getattr(est, method)
# Naive loop (should be same as cross_val_predict):
for train, test in kfold.split(X, y):
est.fit(X[train], y[train])
expected_predictions[test] = func(X[test])
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold)
assert_array_almost_equal(expected_predictions, predictions)
# Test alternative representations of y
predictions_y1 = cross_val_predict(est, X, y + 1, method=method,
cv=kfold)
assert_array_equal(predictions, predictions_y1)
predictions_y2 = cross_val_predict(est, X, y - 2, method=method,
cv=kfold)
assert_array_equal(predictions, predictions_y2)
predictions_ystr = cross_val_predict(est, X, y.astype('str'),
method=method, cv=kfold)
assert_array_equal(predictions, predictions_ystr)
def test_cross_val_predict_with_method():
check_cross_val_predict_with_method(LogisticRegression())
def test_gridsearchcv_cross_val_predict_with_method():
est = GridSearchCV(LogisticRegression(random_state=42),
{'C': [0.1, 1]},
cv=2)
check_cross_val_predict_with_method(est)
def get_expected_predictions(X, y, cv, classes, est, method):
expected_predictions = np.zeros([len(y), classes])
func = getattr(est, method)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
expected_predictions_ = func(X[test])
# To avoid 2 dimensional indexing
exp_pred_test = np.zeros((len(test), classes))
if method is 'decision_function' and len(est.classes_) == 2:
exp_pred_test[:, est.classes_[-1]] = expected_predictions_
else:
exp_pred_test[:, est.classes_] = expected_predictions_
expected_predictions[test] = exp_pred_test
return expected_predictions
def test_cross_val_predict_class_subset():
X = np.arange(8).reshape(4, 2)
y = np.array([0, 0, 1, 2])
classes = 3
kfold3 = KFold(n_splits=3)
kfold4 = KFold(n_splits=4)
le = LabelEncoder()
methods = ['decision_function', 'predict_proba', 'predict_log_proba']
for method in methods:
est = LogisticRegression()
# Test with n_splits=3
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold3)
# Runs a naive loop (should be same as cross_val_predict):
expected_predictions = get_expected_predictions(X, y, kfold3, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
# Test with n_splits=4
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold4)
expected_predictions = get_expected_predictions(X, y, kfold4, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
# Testing unordered labels
y = [1, 1, -4, 6]
predictions = cross_val_predict(est, X, y, method=method,
cv=kfold3)
y = le.fit_transform(y)
expected_predictions = get_expected_predictions(X, y, kfold3, classes,
est, method)
assert_array_almost_equal(expected_predictions, predictions)
def test_score_memmap():
# Ensure a scalar score of memmap type is accepted
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
tf = tempfile.NamedTemporaryFile(mode='wb', delete=False)
tf.write(b'Hello world!!!!!')
tf.close()
scores = np.memmap(tf.name, dtype=np.float64)
score = np.memmap(tf.name, shape=(), mode='r', dtype=np.float64)
try:
cross_val_score(clf, X, y, scoring=lambda est, X, y: score)
# non-scalar should still fail
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=lambda est, X, y: scores)
finally:
# Best effort to release the mmap file handles before deleting the
# backing file under Windows
scores, score = None, None
for _ in range(3):
try:
os.unlink(tf.name)
break
except WindowsError:
sleep(1.)
def test_permutation_test_score_pandas():
# check permutation_test_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
iris = load_iris()
X, y = iris.data, iris.target
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
permutation_test_score(clf, X_df, y_ser)
| bsd-3-clause |
RecipeML/Recipe | recipe/preprocessors/imputer.py | 1 | 1256 | # -*- coding: utf-8 -*-
"""
Copyright 2016 Walter José and Alex de Sá
This file is part of the RECIPE Algorithm.
The RECIPE is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your option)
any later version.
RECIPE is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. See http://www.gnu.org/licenses/.
"""
from sklearn.preprocessing import Imputer
def imputer(args):
"""Uses scikit-learn's Imputer, imputation transformer for completing missing values.
Parameters
----------
strategy : string
The imputation strategy.
If “mean”, then replace missing values using the mean along the axis.
If “median”, then replace missing values using the median along the axis.
If “most_frequent”, then replace missing using the most frequent value along the axis.
"""
strat = args[1]
return Imputer(missing_values='NaN', strategy=strat, axis=0, verbose=0, copy=True) | gpl-3.0 |
thientu/scikit-learn | sklearn/datasets/tests/test_base.py | 204 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
jmargeta/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 44 | 7031 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
"""Test partial dependence for classifier """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
"""Test partial dependence for multi-class classifier """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
"""Test partial dependence for regressor """
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
"""Test input validation of partial dependence. """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
"""Test partial dependence plot function. """
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
"""Test partial dependence plot function input checks. """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
"""Test partial dependence plot function on multi-class input. """
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
wuxue/altanalyze | AltAnalyze_LOCAL_6888.py | 1 | 537268 | #!/usr/local/bin/python2.6
###AltAnalyze
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - [email protected]
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import math
#import pkg_resources
#import distutils
import statistics
import sys, string
import os.path
import unique
import update
import UI
import copy
import export;
reload(export)
import ExpressionBuilder;
reload(ExpressionBuilder)
import ExonAnalyze_module;
reload(ExonAnalyze_module)
import ExonAnnotate_module;
reload(ExonAnnotate_module)
import ResultsExport_module
import GO_Elite
import time
import webbrowser
import random
import traceback
try:
import multiprocessing as mlp
except Exception:
mlp = None
print 'Note: Multiprocessing not supported for this verison python.'
try:
from scipy import stats
except Exception:
pass ### scipy is not required but is used as a faster implementation of Fisher Exact Test when present
try:
from PIL import Image as PIL_Image
try:
import ImageTk
except Exception:
from PIL import ImageTk
except Exception:
None #print 'Python Imaging Library not installed... using default PNG viewer'
use_Tkinter = 'no'
debug_mode = 'no'
analysis_start_time = time.time()
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
dir_list2 = [] #add in code to prevent folder names from being included
for entry in dir_list:
if entry[-4:] == ".txt" or entry[-4:] == ".csv" or entry[-4:] == ".TXT":
dir_list2.append(entry)
return dir_list2
def eliminate_redundant_dict_values(database):
db1 = {}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def makeUnique(item):
db1 = {};
list1 = [];
k = 0
for i in item:
try:
db1[i] = []
except TypeError:
db1[tuple(i)] = []; k = 1
for i in db1:
if k == 0:
list1.append(i)
else:
list1.append(list(i))
list1.sort()
return list1
def cleanUpLine(line):
line = string.replace(line, '\n', '')
line = string.replace(line, '\c', '')
data = string.replace(line, '\r', '')
data = string.replace(data, '"', '')
return data
def returnLargeGlobalVars():
### Prints all large global variables retained in memory (taking up space)
all = [var for var in globals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(globals()[var]) > 500:
print var, len(globals()[var])
except Exception:
null = []
def clearObjectsFromMemory(db_to_clear):
db_keys = {}
try:
for key in db_to_clear: db_keys[key] = []
except Exception:
for key in db_to_clear: del key ### if key is a list
for key in db_keys:
try:
del db_to_clear[key]
except Exception:
try:
for i in key: del i ### For lists of tuples
except Exception:
del key ### For plain lists
def importGeneric(filename):
fn = filepath(filename);
key_db = {}
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data, '\t')
key_db[t[0]] = t[1:]
return key_db
def importGenericFiltered(filename, filter_db):
fn = filepath(filename);
key_db = {}
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data, '\t')
key = t[0]
if key in filter_db: key_db[key] = t[1:]
return key_db
def importGenericFilteredDBList(filename, filter_db):
fn = filepath(filename);
key_db = {}
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data, '\t')
try:
null = filter_db[t[0]]
try:
key_db[t[0]].append(t[1])
except KeyError:
key_db[t[0]] = [t[1]]
except Exception:
null = []
return key_db
def importGenericDBList(filename):
fn = filepath(filename);
key_db = {}
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data, '\t')
try:
key_db[t[0]].append(t[1])
except KeyError:
key_db[t[0]] = [t[1]]
return key_db
def importExternalDBList(filename):
fn = filepath(filename);
key_db = {}
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data, '\t')
try:
key_db[t[0]].append(t[1:])
except Exception:
key_db[t[0]] = [t[1:]]
return key_db
def FindDir(dir, term):
dir_list = unique.read_directory(dir)
dir_list2 = []
dir_list.sort()
for i in dir_list:
if term == i: dir_list2.append(i)
if len(dir_list2) == 0:
for i in dir_list:
if term in i: dir_list2.append(i)
dir_list2.sort();
dir_list2.reverse()
if len(dir_list2) > 0:
return dir_list2[0]
else:
return ''
def openFile(file_dir):
if os.name == 'nt':
try:
os.startfile('"' + file_dir + '"')
except Exception:
os.system('open "' + file_dir + '"')
elif 'darwin' in sys.platform:
os.system('open "' + file_dir + '"')
elif 'linux' in sys.platform:
os.system('xdg-open "' + file_dir + '"')
def openCytoscape(parent_dir, application_dir, application_name):
cytoscape_dir = FindDir(parent_dir, application_dir);
cytoscape_dir = filepath(parent_dir + '/' + cytoscape_dir)
app_dir = FindDir(cytoscape_dir, application_name)
app_dir = cytoscape_dir + '/' + app_dir
if 'linux' in sys.platform:
app_dir = app_dir
app_dir2 = cytoscape_dir + '/Cytoscape'
try:
createCytoscapeDesktop(cytoscape_dir)
except Exception:
null = []
dir_list = unique.read_directory('/usr/bin/') ### Check to see that JAVA is installed
if 'java' not in dir_list: print 'Java not referenced in "usr/bin/. If not installed,\nplease install and re-try opening Cytoscape'
try:
jar_path = cytoscape_dir + '/cytoscape.jar'
main_path = cytoscape_dir + '/cytoscape.CyMain'
plugins_path = cytoscape_dir + '/plugins'
os.system(
'java -Dswing.aatext=true -Xss5M -Xmx512M -jar ' + jar_path + ' ' + main_path + ' -p ' + plugins_path + ' &')
print 'Cytoscape jar opened:', jar_path
except Exception:
print 'OS command to open Java failed.'
try:
try:
openFile(app_dir2); print 'Cytoscape opened:', app_dir2
except Exception:
os.chmod(app_dir, 0777)
openFile(app_dir2)
except Exception:
try:
openFile(app_dir)
except Exception:
os.chmod(app_dir, 0777)
openFile(app_dir)
else:
try:
openFile(app_dir)
except Exception:
os.chmod(app_dir, 0777)
openFile(app_dir)
def createCytoscapeDesktop(cytoscape_dir):
cyto_ds_output = cytoscape_dir + '/Cytoscape.desktop'
data = export.ExportFile(cyto_ds_output)
cytoscape_desktop = cytoscape_dir + '/Cytoscape'; #cytoscape_desktop = '/hd3/home/nsalomonis/Cytoscape_v2.6.1/Cytoscape'
cytoscape_png = cytoscape_dir + '/.install4j/Cytoscape.png'; #cytoscape_png = '/hd3/home/nsalomonis/Cytoscape_v2.6.1/.install4j/Cytoscape.png'
data.write('[Desktop Entry]' + '\n')
data.write('Type=Application' + '\n')
data.write('Name=Cytoscape' + '\n')
data.write('Exec=/bin/sh "' + cytoscape_desktop + '"' + '\n')
data.write('Icon=' + cytoscape_png + '\n')
data.write('Categories=Application;' + '\n')
data.close()
########### Parse Input Annotations ###########
def ProbesetCalls(array_type, probeset_class, splice_event, constitutive_call, external_exonid):
include_probeset = 'yes'
if array_type == 'AltMouse':
exonid = splice_event
if filter_probesets_by == 'exon':
if '-' in exonid or '|' in exonid: ###Therfore the probeset represents an exon-exon junction or multi-exon probeset
include_probeset = 'no'
if filter_probesets_by != 'exon':
if '|' in exonid: include_probeset = 'no'
if constitutive_call == 'yes': include_probeset = 'yes'
else:
if avg_all_for_ss == 'yes' and (probeset_class == 'core' or len(external_exonid) > 2): constitutive_call = 'yes'
#if len(splice_event)>2 and constitutive_call == 'yes' and avg_all_for_ss == 'no': constitutive_call = 'no'
if constitutive_call == 'no' and len(splice_event) < 2 and len(
external_exonid) < 2: ###otherwise these are interesting probesets to keep
if filter_probesets_by != 'full':
if filter_probesets_by == 'extended':
if probeset_class == 'full': include_probeset = 'no'
elif filter_probesets_by == 'core':
if probeset_class != 'core': include_probeset = 'no'
return include_probeset, constitutive_call
def EvidenceOfAltSplicing(slicing_annot):
splice_annotations = ["ntron", "xon", "strangeSplice", "Prime", "3", "5", "C-term"];
as_call = 0
splice_annotations2 = ["ntron", "assette", "strangeSplice", "Prime", "3", "5"]
for annot in splice_annotations:
if annot in slicing_annot: as_call = 1
if as_call == 1:
if "C-term" in slicing_annot and ("N-" in slicing_annot or "Promoter" in slicing_annot):
as_call = 0
for annot in splice_annotations2:
if annot in slicing_annot: as_call = 1
elif "bleed" in slicing_annot and ("N-" in slicing_annot or "Promoter" in slicing_annot):
as_call = 0
for annot in splice_annotations2:
if annot in slicing_annot: as_call = 1
return as_call
########### Begin Analyses ###########
class SplicingAnnotationData:
def ArrayType(self):
self._array_type = array_type
return self._array_type
def Probeset(self):
return self._probeset
def setProbeset(self, probeset):
self._probeset = probeset
def ExonID(self):
return self._exonid
def setDisplayExonID(self, exonid):
self._exonid = exonid
def GeneID(self):
return self._geneid
def Symbol(self):
symbol = ''
if self.GeneID() in annotate_db:
y = annotate_db[self.GeneID()]
symbol = y.Symbol()
return symbol
def ExternalGeneID(self):
return self._external_gene
def ProbesetType(self):
###e.g. Exon, junction, constitutive(gene)
return self._probeset_type
def GeneStructure(self):
return self._block_structure
def SecondaryExonID(self):
return self._block_exon_ids
def setSecondaryExonID(self, ids):
self._block_exon_ids = ids
def setLocationData(self, chromosome, strand, probeset_start, probeset_stop):
self._chromosome = chromosome;
self._strand = strand
self._start = probeset_start;
self._stop = probeset_stop
def LocationSummary(self):
location = self.Chromosome() + ':' + self.ProbeStart() + '-' + self.ProbeStop() + '(' + self.Strand() + ')'
return location
def Chromosome(self):
return self._chromosome
def Strand(self):
return self._strand
def ProbeStart(self):
return self._start
def ProbeStop(self):
return self._stop
def ProbesetClass(self):
###e.g. core, extendended, full
return self._probest_class
def ExternalExonIDs(self):
return self._external_exonids
def ExternalExonIDList(self):
external_exonid_list = string.split(self.ExternalExonIDs(), '|')
return external_exonid_list
def Constitutive(self):
return self._constitutive_status
def setTranscriptCluster(self, secondary_geneid):
self._secondary_geneid = secondary_geneid
def setNovelExon(self, novel_exon):
self._novel_exon = novel_exon
def NovelExon(self):
return self._novel_exon
def SecondaryGeneID(self):
return self._secondary_geneid
def setExonRegionID(self, exon_region):
self._exon_region = exon_region
def ExonRegionID(self):
return self._exon_region
def SplicingEvent(self):
splice_event = self._splicing_event
if len(splice_event) != 0:
if splice_event[0] == '|': splice_event = splice_event[1:]
return splice_event
def SplicingCall(self):
return self._splicing_call
def SpliceJunctions(self):
return self._splice_junctions
def Delete(self):
del self
def Report(self):
output = self.ArrayType() + '|' + self.ExonID() + '|' + self.ExternalGeneID()
return output
def __repr__(self):
return self.Report()
class AltMouseData(SplicingAnnotationData):
def __init__(self, affygene, exons, ensembl, block_exon_ids, block_structure, probe_type_call):
self._geneid = affygene;
self._external_gene = ensembl;
self._exonid = exons;
self._secondary_geneid = ensembl
self._probeset_type = probe_type_call;
self._block_structure = block_structure;
self._block_exon_ids = block_exon_ids
self._external_exonids = 'NA';
self._constitutive_status = 'no'
self._splicing_event = ''
self._secondary_geneid = 'NA'
self._exon_region = ''
if self._probeset_type == 'gene':
self._constitutive_status = 'yes'
else:
self._constitutive_status = 'no'
class AffyExonSTData(SplicingAnnotationData):
def __init__(self, ensembl_gene_id, exon_id, ens_exon_ids, constitutive_call_probeset, exon_region, splicing_event,
splice_junctions, splicing_call):
self._geneid = ensembl_gene_id;
self._external_gene = ensembl_gene_id;
self._exonid = exon_id
self._constitutive_status = constitutive_call_probeset#; self._start = probeset_start; self._stop = probeset_stop
self._external_exonids = ens_exon_ids; #self._secondary_geneid = transcript_cluster_id#; self._chromosome = chromosome; self._strand = strand
self._exon_region = exon_region;
self._splicing_event = splicing_event;
self._splice_junctions = splice_junctions;
self._splicing_call = splicing_call
if self._exonid[0] == 'U':
self._probeset_type = 'UTR'
elif self._exonid[0] == 'E':
self._probeset_type = 'exonic'
elif self._exonid[0] == 'I':
self._probeset_type = 'intronic'
class AffyExonSTDataAbbreviated(SplicingAnnotationData):
def __init__(self, ensembl_gene_id, exon_id, splicing_call):
self._geneid = ensembl_gene_id;
self._exonid = exon_id;
self._splicing_call = splicing_call
def importSplicingAnnotations(array_type, Species, probeset_type, avg_ss_for_all, root_dir):
global filter_probesets_by;
filter_probesets_by = probeset_type
global species;
species = Species;
global avg_all_for_ss;
avg_all_for_ss = avg_ss_for_all;
global exon_db;
exon_db = {}
global summary_data_db;
summary_data_db = {};
global remove_intronic_junctions;
remove_intronic_junctions = 'no'
if array_type == 'RNASeq':
probeset_annotations_file = root_dir + 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_junctions.txt'
else:
probeset_annotations_file = 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_probesets.txt'
filtered_arrayids = {};
filter_status = 'no'
constitutive_probeset_db, exon_db, genes_being_analyzed = importSplicingAnnotationDatabase(
probeset_annotations_file, array_type, filtered_arrayids, filter_status)
return exon_db, constitutive_probeset_db
def importSplicingAnnotationDatabase(filename, array_type, filtered_arrayids, filter_status):
begin_time = time.time()
probesets_included_by_new_evidence = 0;
export_exon_regions = 'yes'
if 'fake' in array_type:
array_type = string.replace(array_type, '-fake', ''); original_arraytype = 'RNASeq'
else:
original_arraytype = array_type
if filter_status == 'no':
global gene_transcript_cluster_db; gene_transcript_cluster_db = {}; gene_transcript_cluster_db2 = {}; global last_exon_region_db; last_exon_region_db = {}
else:
new_exon_db = {}
fn = filepath(filename)
last_gene = ' ';
last_exon_region = ''
constitutive_probeset_db = {};
constitutive_gene = {}
count = 0;
x = 0;
constitutive_original = {}
#if filter_status == 'yes': exon_db = {}
if array_type == 'AltMouse':
for line in open(fn, 'rU').xreadlines():
probeset_data = cleanUpLine(line) #remove endline
probeset, affygene, exons, transcript_num, transcripts, probe_type_call, ensembl, block_exon_ids, block_structure, comparison_info = string.split(
probeset_data, '\t')
###note: currently exclude comparison_info since not applicable for existing analyses
if x == 0:
x = 1
else:
if exons[-1] == '|': exons = exons[0:-1]
if affygene[-1] == '|': affygene = affygene[0:-1]; constitutive_gene[affygene] = []
if probe_type_call == 'gene':
constitutive_call = 'yes' #looked through the probe annotations and the gene seems to be the most consistent constitutive feature
else:
constitutive_call = 'no'
include_call, constitutive_call = ProbesetCalls(array_type, '', exons, constitutive_call, '')
if include_call == 'yes':
probe_data = AltMouseData(affygene, exons, ensembl, block_exon_ids, block_structure,
probe_type_call) #this used to just have affygene,exon in the values (1/17/05)
exon_db[probeset] = probe_data
if filter_status == 'yes': new_exon_db[probeset] = probe_data
if constitutive_call == 'yes': constitutive_probeset_db[probeset] = affygene
genes_being_analyzed = constitutive_gene
else:
for line in open(fn, 'rU').xreadlines():
probeset_data = cleanUpLine(line) #remove endline
if x == 0:
x = 1
else:
try:
probeset_id, exon_id, ensembl_gene_id, transcript_cluster_id, chromosome, strand, probeset_start, probeset_stop, affy_class, constitutive_call_probeset, external_exonid, ens_const_exons, exon_region, exon_region_start, exon_region_stop, splicing_event, splice_junctions = string.split(
probeset_data, '\t')
except Exception:
print probeset_data;force_error
if affy_class == 'free': affy_class = 'full' ### Don't know what the difference is
include_call, constitutive_call = ProbesetCalls(array_type, affy_class, splicing_event,
constitutive_call_probeset, external_exonid)
#if 'ENSG00000163904:E11.5' in probeset_id: print probeset_data
#print array_type,affy_class,splicing_event,constitutive_call_probeset,external_exonid,constitutive_call,include_call;kill
if array_type == 'junction' and '.' not in exon_id: exon_id = string.replace(exon_id, '-',
'.'); exon_region = string.replace(
exon_region, '-', '.')
if ensembl_gene_id != last_gene:
new_gene = 'yes'
else:
new_gene = 'no'
if filter_status == 'no' and new_gene == 'yes':
if '.' in exon_id: ### Exclude junctions
if '-' not in last_exon_region and 'E' in last_exon_region: last_exon_region_db[
last_gene] = last_exon_region
else:
last_exon_region_db[last_gene] = last_exon_region
last_gene = ensembl_gene_id
if len(exon_region) > 1: last_exon_region = exon_region ### some probeset not linked to an exon region
###Record the transcript clusters assoicated with each gene to annotate the results later on
if constitutive_call_probeset != constitutive_call: probesets_included_by_new_evidence += 1#; print probeset_id,[splicing_event],[constitutive_call_probeset];kill
proceed = 'no';
as_call = 0
if array_type == 'RNASeq' or array_type == 'junction': include_call = 'yes' ### Constitutive expression is not needed
if remove_intronic_junctions == 'yes':
if 'E' not in exon_id: include_call = 'no' ### Remove junctions that only have splice-sites within an intron or UTR
if include_call == 'yes' or constitutive_call == 'yes':
#if proceed == 'yes':
as_call = EvidenceOfAltSplicing(splicing_event)
if filter_status == 'no':
probe_data = AffyExonSTDataAbbreviated(ensembl_gene_id, exon_id, as_call)
if array_type != 'RNASeq':
probe_data.setTranscriptCluster(transcript_cluster_id)
try:
if export_exon_regions == 'yes':
probe_data.setExonRegionID(exon_region)
except Exception:
null = []
else:
probe_data = AffyExonSTData(ensembl_gene_id, exon_id, external_exonid, constitutive_call,
exon_region, splicing_event, splice_junctions, as_call)
probe_data.setLocationData(chromosome, strand, probeset_start, probeset_stop)
if array_type != 'RNASeq':
probe_data.setTranscriptCluster(transcript_cluster_id)
else:
probe_data.setNovelExon(affy_class)
if filter_status == 'yes':
try: ### saves memory
null = filtered_arrayids[probeset_id]
new_exon_db[probeset_id] = probe_data
except KeyError:
null = []
else:
exon_db[probeset_id] = probe_data
if constitutive_call == 'yes' and filter_status == 'no': ###only perform function when initially running
constitutive_probeset_db[probeset_id] = ensembl_gene_id
try:
constitutive_gene[ensembl_gene_id].append(probeset_id)
except Exception:
constitutive_gene[ensembl_gene_id] = [probeset_id]
###Only consider transcript clusters that make up the constitutive portion of the gene or that are alternatively regulated
if array_type != 'RNASeq':
try:
gene_transcript_cluster_db[ensembl_gene_id].append(transcript_cluster_id)
except KeyError:
gene_transcript_cluster_db[ensembl_gene_id] = [transcript_cluster_id]
if constitutive_call_probeset == 'yes' and filter_status == 'no': ###only perform function when initially running
try:
constitutive_original[ensembl_gene_id].append(probeset_id)
except KeyError:
constitutive_original[ensembl_gene_id] = [probeset_id]
if array_type != 'RNASeq':
try:
gene_transcript_cluster_db2[ensembl_gene_id].append(transcript_cluster_id)
except KeyError:
gene_transcript_cluster_db2[ensembl_gene_id] = [transcript_cluster_id]
###If no constitutive probesets for a gene as a result of additional filtering (removing all probesets associated with a splice event), add these back
original_probesets_add = 0;
genes_being_analyzed = {}
for gene in constitutive_gene: genes_being_analyzed[gene] = []
for gene in constitutive_original:
if gene not in constitutive_gene:
genes_being_analyzed[gene] = [gene]
constitutive_gene[gene] = []
original_probesets_add += 1
gene_transcript_cluster_db[gene] = gene_transcript_cluster_db2[gene]
for probeset in constitutive_original[gene]: constitutive_probeset_db[probeset] = gene
#if array_type == 'junction' or array_type == 'RNASeq':
### Added the below in 1.16!!!
### If no constitutive probesets for a gene assigned, assign all gene probesets
for probeset in exon_db:
gene = exon_db[probeset].GeneID()
proceed = 'no'
exonid = exon_db[probeset].ExonID()
### Rather than add all probesets, still filter based on whether the probeset is in an annotated exon
if 'E' in exonid and 'I' not in exonid and '_' not in exonid: proceed = 'yes'
if proceed == 'yes':
if gene not in constitutive_gene:
constitutive_probeset_db[probeset] = gene
genes_being_analyzed[gene] = [gene]
### DO NOT ADD TO constitutive_gene SINCE WE WANT ALL mRNA ALIGNING EXONS/JUNCTIONS TO BE ADDED!!!!
#constitutive_gene[gene]=[]
gene_transcript_cluster_db = eliminate_redundant_dict_values(gene_transcript_cluster_db)
#if affygene == 'ENSMUSG00000023089': print [abs(fold_change_log)],[log_fold_cutoff];kill
if array_type == 'RNASeq':
import RNASeq
try:
last_exon_region_db = RNASeq.importExonAnnotations(species, 'distal-exon', '')
except Exception:
null = []
constitutive_original = [];
constitutive_gene = []
#clearObjectsFromMemory(exon_db); constitutive_probeset_db=[];genes_being_analyzed=[] ### used to evaluate how much memory objects are taking up
#print 'remove_intronic_junctions:',remove_intronic_junctions
#print constitutive_gene['ENSMUSG00000031170'];kill ### Determine if avg_ss_for_all is working
if original_arraytype == 'RNASeq':
id_name = 'exon/junction IDs'
else:
id_name = 'array IDs'
print len(exon_db), id_name, 'stored as instances of SplicingAnnotationData in memory'
#print len(constitutive_probeset_db),'array IDs stored as constititive'
#print probesets_included_by_new_evidence, 'array IDs were re-annotated as NOT constitutive based on mRNA evidence'
if array_type != 'AltMouse': print original_probesets_add, 'genes not viewed as constitutive as a result of filtering', id_name, 'based on splicing evidence, added back'
end_time = time.time();
time_diff = int(end_time - begin_time)
#print filename,"import finished in %d seconds" % time_diff
if filter_status == 'yes':
return new_exon_db
else:
summary_data_db['gene_assayed'] = len(genes_being_analyzed)
try:
exportDenominatorGenes(genes_being_analyzed)
except Exception:
null = []
return constitutive_probeset_db, exon_db, genes_being_analyzed
def exportDenominatorGenes(genes_being_analyzed):
goelite_output = root_dir + 'GO-Elite/denominator/AS.denominator.txt'
goelite_data = export.ExportFile(goelite_output)
systemcode = 'En'
goelite_data.write("GeneID\tSystemCode\n")
for gene in genes_being_analyzed:
if array_type == 'AltMouse':
try:
gene = annotate_db[gene].ExternalGeneID()
except KeyError:
null = []
goelite_data.write(gene + '\t' + systemcode + '\n')
try:
goelite_data.close()
except Exception:
null = []
def performExpressionAnalysis(filename, constitutive_probeset_db, exon_db, annotate_db, dataset_name):
#if analysis_method == 'splicing-index': returnLargeGlobalVars();kill ### used to ensure all large global vars from the reciprocal junction analysis have been cleared from memory
#returnLargeGlobalVars()
"""import list of expression values for arrayids and calculates statistics"""
global fold_dbase;
global original_conditions;
global normalization_method
stats_dbase = {};
fold_dbase = {};
ex_db = {};
si_db = [];
bad_row_import = {};
count = 0
global array_group_name_db;
array_group_name_db = {}
global array_group_db;
array_group_db = {};
global array_raw_group_values;
array_raw_group_values = {};
global original_array_names;
original_array_names = []
global max_replicates;
global equal_replicates;
global array_group_list
array_index_list = [] ###Use this list for permutation analysis
fn = filepath(filename);
line_num = 1
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line);
t = string.split(data, '\t');
probeset = t[0]
if t[0] == '#':
null = [] ### Don't import line
elif line_num == 1:
line_num += 1 #makes this value null for the next loop of actual array data
###Below ocucrs if the data is raw opposed to precomputed
if ':' in t[1]:
array_group_list = [];
x = 0 ###gives us an original index value for each entry in the group
for entry in t[1:]:
original_array_names.append(entry)
aa = string.split(entry, ':')
try:
array_group, array_name = aa
except Exception:
array_name = string.join(aa[1:], ':'); array_group = aa[0]
try:
array_group_db[array_group].append(x)
array_group_name_db[array_group].append(array_name)
except KeyError:
array_group_db[array_group] = [x]
array_group_name_db[array_group] = [array_name]
### below only occurs with a new group addition
array_group_list.append(
array_group) #use this to generate comparisons in the below linked function
x += 1
else:
#try: print data_type
#except Exception,exception:
#print exception
#print traceback.format_exc()
print_out = 'The AltAnalyze filtered expression file "' + filename + '" is not propperly formatted.\n Review formatting requirements if this file was created by another application.\n'
print_out += "\nFirst line\n" + line
try:
UI.WarningWindow(print_out, 'Exit'); print print_out
except Exception:
print print_out
badExit()
else:
#if probeset in exon_db:
#if exon_db[probeset].GeneID() == 'ENSG00000139970':
###Use the index values from above to assign each expression value to a new database
temp_group_array = {}
line_num += 1
for group in array_group_db:
if count == 0: array_index_list.append(array_group_db[group])
for array_index in array_group_db[group]:
try:
exp_val = float(t[array_index + 1])
except Exception:
if 'Gene_ID' not in line: bad_row_import[probeset] = line; exp_val = 1
###appended is the numerical expression value for each array in the group (temporary array)
try:
temp_group_array[group].append(exp_val) #add 1 since probeset is the first column
except KeyError:
temp_group_array[group] = [exp_val]
if count == 0: array_index_list.sort(); count = 1
####store the group database within the probeset database entry
try:
null = exon_db[
probeset] ###To conserve memory, don't store any probesets not used for downstream analyses (e.g. not linked to mRNAs)
#if 'ENSG00000139970' in probeset:
#print [max_exp]
#print t[1:];kill
#max_exp = max(map(float, t[1:]))
#if len(array_raw_group_values)>10000: break
#if max_exp>math.log(70,2):
array_raw_group_values[probeset] = temp_group_array
except KeyError:
#print probeset
pass
print len(array_raw_group_values), 'sequence identifiers imported out of', line_num - 1
if len(bad_row_import) > 0:
print len(bad_row_import), "Rows with an unexplained import error processed and deleted."
print "Example row:";
x = 0
for i in bad_row_import:
if x == 0: print bad_row_import[i]
try:
del array_raw_group_values[i]
except Exception:
null = []
x += 1
### If no gene expression reporting probesets were imported, update constitutive_probeset_db to include all mRNA aligning probesets
cs_genedb = {};
missing_genedb = {};
addback_genedb = {};
rnaseq_cs_gene_db = {}
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
#if gene == 'ENSG00000185008': print [probeset]
try:
null = array_raw_group_values[probeset];
cs_genedb[gene] = []
if gene == probeset: rnaseq_cs_gene_db[
gene] = [] ### If RPKM normalization used, use the gene expression values already calculated
except Exception:
missing_genedb[gene] = [] ### Collect possible that are missing from constitutive database (verify next)
for gene in missing_genedb:
try:
null = cs_genedb[gene]
except Exception:
addback_genedb[gene] = []
for probeset in array_raw_group_values:
try:
gene = exon_db[probeset].GeneID()
try:
null = addback_genedb[gene]
if 'I' not in probeset and 'U' not in probeset: ### No intron or UTR containing should be used for constitutive expression
null = string.split(probeset, ':')
if len(null) < 3: ### No trans-gene junctions should be used for constitutive expression
constitutive_probeset_db[probeset] = gene
except Exception:
null = []
except Exception:
null = []
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
#if gene == 'ENSG00000185008': print [[probeset]]
### Only examine values for associated exons when determining RNASeq constitutive expression (when exon data is present)
normalization_method = 'raw'
if array_type == 'RNASeq':
junction_count = 0;
constitutive_probeset_db2 = {}
for uid in constitutive_probeset_db:
if '-' in uid: junction_count += 1
if len(
rnaseq_cs_gene_db) > 0: ### If filtered RPKM gene-level expression data present, use this instead (and only this)
normalization_method = 'RPKM'
constitutive_probeset_db = {} ### Re-set this database
for gene in rnaseq_cs_gene_db:
constitutive_probeset_db[gene] = gene
elif junction_count != 0 and len(constitutive_probeset_db) != junction_count:
### occurs when there is a mix of junction and exon IDs
for uid in constitutive_probeset_db:
if '-' not in uid: constitutive_probeset_db2[uid] = constitutive_probeset_db[uid]
constitutive_probeset_db = constitutive_probeset_db2;
constitutive_probeset_db2 = []
"""
for probeset in constitutive_probeset_db:
gene = constitutive_probeset_db[probeset]
if gene == 'ENSG00000185008': print [probeset]
"""
###Build all putative splicing events
global alt_junction_db;
global exon_dbase;
global critical_exon_db;
critical_exon_db = {}
if array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
### Applies to reciprocal junction analyses only
if array_type == 'AltMouse':
alt_junction_db, critical_exon_db, exon_dbase, exon_inclusion_db, exon_db = ExonAnnotate_module.identifyPutativeSpliceEvents(
exon_db, constitutive_probeset_db, array_raw_group_values, agglomerate_inclusion_probesets,
onlyAnalyzeJunctions)
print 'Number of Genes with Examined Splice Events:', len(alt_junction_db)
elif (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null':
import JunctionArray
alt_junction_db, critical_exon_db, exon_dbase, exon_inclusion_db, exon_db = JunctionArray.getPutativeSpliceEvents(
species, array_type, exon_db, agglomerate_inclusion_probesets, root_dir)
print 'Number of Genes with Examined Splice Events:', len(alt_junction_db)
#alt_junction_db=[]; critical_exon_db=[]; exon_dbase=[]; exon_inclusion_db=[]
if agglomerate_inclusion_probesets == 'yes':
array_raw_group_values = agglomerateInclusionProbesets(array_raw_group_values, exon_inclusion_db)
exon_inclusion_db = []
### For datasets with high memory requirements (RNASeq), filter the current and new databases
### Begin this function after agglomeration to ensure agglomerated probesets are considered
reciprocal_probesets = {}
if array_type == 'junction' or array_type == 'RNASeq':
for affygene in alt_junction_db:
for event in alt_junction_db[affygene]:
reciprocal_probesets[event.InclusionProbeset()] = []
reciprocal_probesets[event.ExclusionProbeset()] = []
not_evalutated = {}
for probeset in array_raw_group_values:
try:
null = reciprocal_probesets[probeset]
except Exception:
### Don't remove constitutive probesets
try:
null = constitutive_probeset_db[probeset]
except Exception:
not_evalutated[probeset] = []
#print 'Removing',len(not_evalutated),'exon/junction IDs not evaulated for splicing'
for probeset in not_evalutated:
del array_raw_group_values[probeset]
###Check to see if we have precomputed expression data or raw to be analyzed
x = 0;
y = 0;
array_raw_group_values2 = {};
probesets_to_delete = [] ### Record deleted probesets
if len(array_raw_group_values) == 0:
print_out = "No genes were considered 'Expressed' based on your input options. Check to make sure that the right species database is indicated and that the right data format has been selected (e.g., non-log versus log expression)."
try:
UI.WarningWindow(print_out, 'Exit')
except Exception:
print print_out; print "Exiting program"
badExit()
elif len(array_raw_group_values) > 0:
###array_group_list should already be unique and correctly sorted (see above)
for probeset in array_raw_group_values:
data_lists = []
for group_name in array_group_list:
data_list = array_raw_group_values[probeset][
group_name] ###nested database entry access - baseline expression
if global_addition_factor > 0: data_list = addGlobalFudgeFactor(data_list, 'log')
data_lists.append(data_list)
if len(array_group_list) == 2:
data_list1 = data_lists[0];
data_list2 = data_lists[-1];
avg1 = statistics.avg(data_list1);
avg2 = statistics.avg(data_list2)
log_fold = avg2 - avg1
try:
#t,df,tails = statistics.ttest(data_list1,data_list2,2,3) #unpaired student ttest, calls p_value function
#t = abs(t); df = round(df) #Excel doesn't recognize fractions in a DF
#p = statistics.t_probability(t,df)
p = statistics.runComparisonStatistic(data_list1, data_list2, probability_statistic)
if p == -1:
if len(data_list1) > 1 and len(data_list2) > 1:
print_out = "The probability statistic selected (" + probability_statistic + ") is not compatible with the\nexperimental design. Please consider an alternative statistic or correct the problem.\nExiting AltAnalyze."
try:
UI.WarningWindow(print_out, 'Exit')
except Exception:
print print_out; print "Exiting program"
badExit()
else:
p = 1
except Exception:
p = 1
fold_dbase[probeset] = [0];
fold_dbase[probeset].append(log_fold)
stats_dbase[probeset] = [avg1];
stats_dbase[probeset].append(p)
###replace entries with the two lists for later permutation analysis
if p == -1: ### should by p == 1: Not sure why this filter was here, but mistakenly removes probesets where there is just one array for each group
del fold_dbase[probeset];
del stats_dbase[probeset];
probesets_to_delete.append(probeset);
x += 1
if x == 1: print 'Bad data detected...', data_list1, data_list2
elif (
avg1 < expression_threshold and avg2 < expression_threshold and p > p_threshold) and array_type != 'RNASeq': ### Inserted a filtering option to exclude small variance, low expreession probesets
del fold_dbase[probeset];
del stats_dbase[probeset];
probesets_to_delete.append(probeset);
x += 1
else:
array_raw_group_values2[probeset] = [data_list1, data_list2]
else: ###Non-junction analysis can handle more than 2 groups
index = 0
for data_list in data_lists:
try:
array_raw_group_values2[probeset].append(data_list)
except KeyError:
array_raw_group_values2[probeset] = [data_list]
if len(array_group_list) > 2: ### Thus, there is some variance for this probeset
### Create a complete stats_dbase containing all fold changes
if index == 0:
avg_baseline = statistics.avg(data_list);
stats_dbase[probeset] = [avg_baseline]
else:
avg_exp = statistics.avg(data_list)
log_fold = avg_exp - avg_baseline
try:
fold_dbase[probeset].append(log_fold)
except KeyError:
fold_dbase[probeset] = [0, log_fold]
index += 1
if array_type == 'RNASeq':
id_name = 'exon/junction IDs'
else:
id_name = 'array IDs'
array_raw_group_values = array_raw_group_values2;
array_raw_group_values2 = []
print x, id_name, "excluded prior to analysis... predicted not detected"
global original_avg_const_exp_db;
global original_fold_dbase
global avg_const_exp_db;
global permute_lists;
global midas_db
if len(array_raw_group_values) > 0:
adj_fold_dbase, nonlog_NI_db, conditions, gene_db, constitutive_gene_db, constitutive_fold_change, original_avg_const_exp_db = constitutive_exp_normalization(
fold_dbase, stats_dbase, exon_db, constitutive_probeset_db)
stats_dbase = [] ### No longer needed after this point
original_fold_dbase = fold_dbase;
avg_const_exp_db = {};
permute_lists = [];
y = 0;
original_conditions = conditions;
max_replicates, equal_replicates = maxReplicates()
gene_expression_diff_db = constitutive_expression_changes(constitutive_fold_change,
annotate_db) ###Add in constitutive fold change filter to assess gene expression for ASPIRE
while conditions > y:
avg_const_exp_db = constitutive_exp_normalization_raw(gene_db, constitutive_gene_db, array_raw_group_values,
exon_db, y, avg_const_exp_db);
y += 1
#print len(avg_const_exp_db),constitutive_gene_db['ENSMUSG00000054850']
###Export Analysis Results for external splicing analysis (e.g. MiDAS format)
if run_MiDAS == 'yes' and normalization_method != 'RPKM': ### RPKM has negative values which will crash MiDAS
status = ResultsExport_module.exportTransitResults(array_group_list, array_raw_group_values,
array_group_name_db, avg_const_exp_db, adj_fold_dbase,
exon_db, dataset_name, apt_location)
print "Finished exporting input data for MiDAS analysis"
try:
midas_db = ResultsExport_module.importMidasOutput(dataset_name)
except Exception:
midas_db = {} ### Occurs if there are not enough samples to calculate a MiDAS p-value
else:
midas_db = {}
###Provides all pairwise permuted group comparisons
if array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
permute_lists = statistics.permute_arrays(array_index_list)
### Now remove probesets from the analysis that were used to evaluate gene expression
for probeset in constitutive_probeset_db:
try:
null = reciprocal_probesets[probeset]
except Exception:
try:
del array_raw_group_values[probeset]
except Exception:
null = []
not_evalutated = [];
reciprocal_probesets = []
constitutive_probeset_db = []
### Above, all conditions were examined when more than 2 are present... change this so that only the most extreeem are analyzed further
if len(array_group_list) > 2 and analysis_method == 'splicing-index' and (
array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null'): ### USED FOR MULTIPLE COMPARISONS
print 'Calculating splicing-index values for multiple group comparisons (please be patient)...',
"""
if len(midas_db)==0:
print_out = 'Warning!!! MiDAS failed to run for multiple groups. Please make\nsure there are biological replicates present for your groups.\nAltAnalyze requires replicates for multi-group (more than two) analyses.'
try: UI.WarningWindow(print_out,'Exit')
except Exception: print print_out; print "Exiting program"
badExit()"""
if filter_for_AS == 'yes':
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try:
del nonlog_NI_db[probeset]
except KeyError:
null = []
if export_NI_values == 'yes':
export_exon_regions = 'yes'
### Currently, we don't deal with raw adjusted expression values, just group, so just export the values for each group
summary_output = root_dir + 'AltResults/RawSpliceData/' + species + '/' + analysis_method + '/' + dataset_name[
:-1] + '.txt'
print "Exporting all normalized intensities to:\n" + summary_output
adjoutput = export.ExportFile(summary_output)
title = string.join(['Gene\tExonID\tprobesetID'] + original_array_names, '\t') + '\n';
adjoutput.write(title)
### Pick which data lists have the most extreem values using the NI_dbase (adjusted folds for each condition)
original_increment = int(len(nonlog_NI_db) / 20);
increment = original_increment;
interaction = 0
for probeset in nonlog_NI_db:
if interaction == increment: increment += original_increment; print '*',
interaction += 1
geneid = exon_db[probeset].GeneID();
ed = exon_db[probeset]
index = 0;
NI_list = [] ### Add the group_name to each adj fold value
for NI in nonlog_NI_db[probeset]:
NI_list.append((NI, index));
index += 1 ### setup to sort for the extreeme adj folds and get associated group_name using the index
raw_exp_vals = array_raw_group_values[probeset]
adj_exp_lists = {} ### Store the adjusted expression values for each group
if geneid in avg_const_exp_db:
k = 0;
gi = 0;
adj_exp_vals = []
for exp_list in raw_exp_vals:
for exp in exp_list:
adj_exp_val = exp - avg_const_exp_db[geneid][k]
try:
adj_exp_lists[gi].append(adj_exp_val)
except Exception:
adj_exp_lists[gi] = [adj_exp_val]
if export_NI_values == 'yes': adj_exp_vals.append(str(adj_exp_val))
k += 1
gi += 1
if export_NI_values == 'yes':
#print geneid+'-'+probeset, adj_exp_val, [ed.ExonID()];kill
if export_exon_regions == 'yes':
try: ### Thid will only work if ExonRegionID is stored in the abreviated AffyExonSTData object - useful in comparing results between arrays (exon-region centric)
if (
array_type == 'exon' or array_type == 'gene') or '-' not in ed.ExonID(): ### only include exon entries not junctions
exon_regions = string.split(ed.ExonRegionID(), '|')
for er in exon_regions:
if len(er) > 0:
er = er
else:
try:
er = ed.ExonID()
except Exception:
er = 'NA'
ev = string.join([geneid + '\t' + er + '\t' + probeset] + adj_exp_vals,
'\t') + '\n'
if len(filtered_probeset_db) > 0:
if probeset in filtered_probeset_db: adjoutput.write(
ev) ### This is used when we want to restrict to only probesets known to already by changed
else:
adjoutput.write(ev)
except Exception:
ev = string.join([geneid + '\t' + 'NA' + '\t' + probeset] + adj_exp_vals, '\t') + '\n';
adjoutput.write(ev)
NI_list.sort()
examine_pairwise_comparisons = 'yes'
if examine_pairwise_comparisons == 'yes':
k1 = 0;
k2 = 0;
filtered_NI_comps = []
NI_list_rev = list(NI_list);
NI_list_rev.reverse()
NI1, index1 = NI_list[k1];
NI2, index2 = NI_list_rev[k2];
abs_SI = abs(math.log(NI1 / NI2, 2))
if abs_SI < alt_exon_logfold_cutoff:
### Indicates that no valid matches were identified - hence, exit loop and return an NI_list with no variance
NI_list = [NI_list[0], NI_list[0]]
else:
### Indicates that no valid matches were identified - hence, exit loop and return an NI_list with no variance
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2 - constit_exp1
#print 'original',abs_SI,k1,k2, ge_fold, constit_exp1, constit_exp2
if abs(ge_fold) < log_fold_cutoff:
filtered_NI_comps.append([abs_SI, k1, k2])
else:
for i1 in NI_list:
k2 = 0
for i2 in NI_list_rev:
NI1, index1 = i1;
NI2, index2 = i2;
abs_SI = abs(math.log(NI1 / NI2, 2))
#constit_exp1 = original_avg_const_exp_db[geneid][index1]
#constit_exp2 = original_avg_const_exp_db[geneid][index2]
#ge_fold = constit_exp2-constit_exp1
#if abs(ge_fold) < log_fold_cutoff: filtered_NI_comps.append([abs_SI,k1,k2])
#print k1,k2, i1, i2, abs_SI, abs(ge_fold), log_fold_cutoff, alt_exon_logfold_cutoff
if abs_SI < alt_exon_logfold_cutoff:
break
else:
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2 - constit_exp1
if abs(ge_fold) < log_fold_cutoff:
filtered_NI_comps.append([abs_SI, k1, k2])
#if k1 == 49 or k1 == 50 or k1 == 51: print probeset, abs_SI, k1, k2, abs(ge_fold),log_fold_cutoff, index1, index2, NI1, NI2, constit_exp1,constit_exp2
k2 += 1
k1 += 1
if len(filtered_NI_comps) > 0:
#print filtered_NI_comps
#print NI_list_rev
#print probeset,geneid
#print len(filtered_NI_comps)
#print original_avg_const_exp_db[geneid]
filtered_NI_comps.sort()
si, k1, k2 = filtered_NI_comps[-1]
NI_list = [NI_list[k1], NI_list_rev[k2]]
"""
NI1,index1 = NI_list[0]; NI2,index2 = NI_list[-1]
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2-constit_exp1
print probeset, si, ge_fold, NI_list"""
#print k1,k2;sys.exit()
index1 = NI_list[0][1];
index2 = NI_list[-1][1]
nonlog_NI_db[probeset] = [NI_list[0][0], NI_list[-1][0]] ### Update the values of this dictionary
data_list1 = array_raw_group_values[probeset][index1];
data_list2 = array_raw_group_values[probeset][index2]
avg1 = statistics.avg(data_list1);
avg2 = statistics.avg(data_list2);
log_fold = avg2 - avg1
group_name1 = array_group_list[index1];
group_name2 = array_group_list[index2]
try:
#t,df,tails = statistics.ttest(data_list1,data_list2,2,3) #unpaired student ttest, calls p_value function
#t = abs(t); df = round(df); ttest_exp_p = statistics.t_probability(t,df)
ttest_exp_p = statistics.runComparisonStatistic(data_list1, data_list2, probability_statistic)
except Exception:
ttest_exp_p = 1
fold_dbase[probeset] = [0];
fold_dbase[probeset].append(log_fold)
if ttest_exp_p == -1:
del fold_dbase[probeset]; probesets_to_delete.append(probeset); x += 1
elif avg1 < expression_threshold and avg2 < expression_threshold and (
ttest_exp_p > p_threshold and ttest_exp_p != 1): ### Inserted a filtering option to exclude small variance, low expreession probesets
del fold_dbase[probeset];
probesets_to_delete.append(probeset);
x += 1
else:
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2 - constit_exp1
normInt1 = (avg1 - constit_exp1);
normInt2 = (avg2 - constit_exp2)
adj_fold = normInt2 - normInt1
splicing_index = -1 * adj_fold;
abs_splicing_index = abs(splicing_index)
#print probeset, splicing_index, ge_fold, index1, index2
#normIntList1 = adj_exp_lists[index1]; normIntList2 = adj_exp_lists[index2]
all_nI = []
for g_index in adj_exp_lists: all_nI.append(adj_exp_lists[g_index])
try:
normIntensityP = statistics.OneWayANOVA(
all_nI) #[normIntList1,normIntList2] ### This stays an ANOVA independent of the algorithm choosen since groups number > 2
except Exception:
normIntensityP = 'NA'
if (normInt1 * normInt2) < 0:
opposite_SI_log_mean = 'yes'
else:
opposite_SI_log_mean = 'no'
abs_log_ratio = abs(ge_fold)
if probeset in midas_db:
try:
midas_p = float(midas_db[probeset])
except ValueError:
midas_p = 'NA'
else:
midas_p = 'NA'
#if 'ENSG00000059588' in geneid: print probeset, splicing_index, constit_exp1, constit_exp2, ge_fold,group_name2+'_vs_'+group_name1, index1, index2
if abs_splicing_index > alt_exon_logfold_cutoff and (
midas_p < p_threshold or midas_p == 'NA'): #and abs_log_ratio>1 and ttest_exp_p<0.05: ###and ge_threshold_count==2
exonid = ed.ExonID();
critical_exon_list = [1, [exonid]]
ped = ProbesetExpressionData(avg1, avg2, log_fold, adj_fold, ttest_exp_p,
group_name2 + '_vs_' + group_name1)
sid = ExonData(splicing_index, probeset, critical_exon_list, geneid, normInt1, normInt2,
normIntensityP, opposite_SI_log_mean)
sid.setConstitutiveExpression(constit_exp1);
sid.setConstitutiveFold(ge_fold);
sid.setProbesetExpressionData(ped)
si_db.append((splicing_index, sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(splicing_index, geneid, normIntensityP)
ex_db[probeset] = eed
if array_type == 'RNASeq':
id_name = 'exon/junction IDs'
else:
id_name = 'array IDs'
print len(si_db), id_name, "with evidence of Alternative expression"
original_fold_dbase = fold_dbase;
si_db.sort()
summary_data_db['denominator_exp_events'] = len(nonlog_NI_db)
del avg_const_exp_db;
del gene_db;
del constitutive_gene_db;
gene_expression_diff_db = {}
if export_NI_values == 'yes': adjoutput.close()
### Above, all conditions were examined when more than 2 are present... change this so that only the most extreeem are analyzed further
elif len(array_group_list) > 2 and (
array_type == 'junction' or array_type == 'RNASeq' or array_type == 'AltMouse'): ### USED FOR MULTIPLE COMPARISONS
excluded_probeset_db = {}
group_sizes = [];
original_array_indices = permute_lists[
0] ###p[0] is the original organization of the group samples prior to permutation
for group in original_array_indices: group_sizes.append(len(group))
if analysis_method == 'linearregres': ### For linear regression, these scores are non-long
original_array_raw_group_values = copy.deepcopy(array_raw_group_values)
for probeset in array_raw_group_values:
ls_concatenated = []
for group in array_raw_group_values[probeset]: ls_concatenated += group
ls_concatenated = statistics.log_fold_conversion_fraction(ls_concatenated)
array_raw_group_values[probeset] = ls_concatenated
pos1 = 0;
pos2 = 0;
positions = []
for group in group_sizes:
if pos1 == 0:
pos2 = group; positions.append((pos1, pos2))
else:
pos2 = pos1 + group; positions.append((pos1, pos2))
pos1 = pos2
if export_NI_values == 'yes':
export_exon_regions = 'yes'
### Currently, we don't deal with raw adjusted expression values, just group, so just export the values for each group
summary_output = root_dir + 'AltResults/RawSpliceData/' + species + '/' + analysis_method + '/' + dataset_name[
:-1] + '.txt'
print "Exporting all normalized intensities to:\n" + summary_output
adjoutput = export.ExportFile(summary_output)
title = string.join(['gene\tprobesets\tExonRegion'] + original_array_names, '\t') + '\n';
adjoutput.write(title)
events_examined = 0;
denominator_events = 0;
fold_dbase = [];
adj_fold_dbase = [];
scores_examined = 0
splice_event_list = [];
splice_event_list_mx = [];
splice_event_list_non_mx = [];
event_mx_temp = [];
permute_p_values = {};
probeset_comp_db = {}#use this to exclude duplicate mx events
for geneid in alt_junction_db:
affygene = geneid
for event in alt_junction_db[geneid]:
if array_type == 'AltMouse':
#event = [('ei', 'E16-E17'), ('ex', 'E16-E18')]
#critical_exon_db[affygene,tuple(critical_exons)] = [1,'E'+str(e1a),'E'+str(e2b)] --- affygene,tuple(event) == key, 1 indicates both are either up or down together
event_call = event[0][0] + '-' + event[1][0]
exon_set1 = event[0][1];
exon_set2 = event[1][1]
probeset1 = exon_dbase[affygene, exon_set1]
probeset2 = exon_dbase[affygene, exon_set2]
critical_exon_list = critical_exon_db[affygene, tuple(event)]
if array_type == 'junction' or array_type == 'RNASeq':
event_call = 'ei-ex' ### Below objects from JunctionArrayEnsemblRules - class JunctionInformation
probeset1 = event.InclusionProbeset();
probeset2 = event.ExclusionProbeset()
exon_set1 = event.InclusionJunction();
exon_set2 = event.ExclusionJunction()
try:
novel_event = event.NovelEvent()
except Exception:
novel_event = 'known'
critical_exon_list = [1, event.CriticalExonSets()]
key, jd = formatJunctionData([probeset1, probeset2], geneid, critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try:
jd.setSymbol(annotate_db[geneid].Symbol())
except Exception:
null = []
#if '|' in probeset1: print probeset1, key,jd.InclusionDisplay();kill
probeset_comp_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
dI_scores = []
if probeset1 in nonlog_NI_db and probeset2 in nonlog_NI_db and probeset1 in array_raw_group_values and probeset2 in array_raw_group_values:
events_examined += 1
if analysis_method == 'ASPIRE':
index1 = 0;
NI_list1 = [];
NI_list2 = [] ### Add the group_name to each adj fold value
for NI in nonlog_NI_db[probeset1]: NI_list1.append(NI)
for NI in nonlog_NI_db[probeset2]: NI_list2.append(NI)
for NI1_g1 in NI_list1:
NI2_g1 = NI_list2[index1];
index2 = 0
for NI1_g2 in NI_list1:
try:
NI2_g2 = NI_list2[index2]
except Exception:
print index1, index2, NI_list1, NI_list2;kill
if index1 != index2:
b1 = NI1_g1;
e1 = NI1_g2
b2 = NI2_g1;
e2 = NI2_g2
try:
dI = statistics.aspire_stringent(b1, e1, b2, e2);
Rin = b1 / e1;
Rex = b2 / e2
if (Rin > 1 and Rex < 1) or (Rin < 1 and Rex > 1):
if dI < 0:
i1, i2 = index2, index1 ### all scores should indicate upregulation
else:
i1, i2 = index1, index2
dI_scores.append((abs(dI), i1, i2))
except Exception:
#if array_type != 'RNASeq': ### RNASeq has counts of zero and one that can cause the same result between groups and probesets
#print probeset1, probeset2, b1, e1, b2, e2, index1, index2, events_examined;kill
### Exception - Occurs for RNA-Seq but can occur for array data under extreemly rare circumstances (Rex=Rin even when different b1,e1 and b2,ed values)
null = []
index2 += 1
index1 += 1
dI_scores.sort()
if analysis_method == 'linearregres':
log_fold, i1, i2 = getAllPossibleLinearRegressionScores(probeset1, probeset2, positions,
group_sizes)
dI_scores.append((log_fold, i1, i2))
raw_exp_vals1 = original_array_raw_group_values[probeset1];
raw_exp_vals2 = original_array_raw_group_values[probeset2]
else:
raw_exp_vals1 = array_raw_group_values[probeset1]; raw_exp_vals2 = array_raw_group_values[
probeset2]
adj_exp_lists1 = {};
adj_exp_lists2 = {} ### Store the adjusted expression values for each group
if geneid in avg_const_exp_db:
gi = 0;
l = 0;
adj_exp_vals = [];
anova_test = []
for exp_list in raw_exp_vals1:
k = 0;
anova_group = []
for exp in exp_list:
adj_exp_val1 = exp - avg_const_exp_db[geneid][l]
try:
adj_exp_lists1[gi].append(adj_exp_val1)
except Exception:
adj_exp_lists1[gi] = [adj_exp_val1]
adj_exp_val2 = raw_exp_vals2[gi][k] - avg_const_exp_db[geneid][l]
try:
adj_exp_lists2[gi].append(adj_exp_val2)
except Exception:
adj_exp_lists2[gi] = [adj_exp_val2]
anova_group.append(adj_exp_val2 - adj_exp_val1)
if export_NI_values == 'yes':
#if analysis_method == 'ASPIRE':
adj_exp_vals.append(str(adj_exp_val2 - adj_exp_val1))
### BELOW CODE PRODUCES THE SAME RESULT!!!!
"""folds1 = statistics.log_fold_conversion_fraction([exp])
folds2 = statistics.log_fold_conversion_fraction([raw_exp_vals2[gi][k]])
lr_score = statistics.convert_to_log_fold(statistics.simpleLinRegress(folds1,folds2))
adj_exp_vals.append(str(lr_score))"""
k += 1;
l += 0
gi += 1;
anova_test.append(anova_group)
if export_NI_values == 'yes':
if export_exon_regions == 'yes':
exon_regions = string.join(critical_exon_list[1], '|')
exon_regions = string.split(exon_regions, '|')
for er in exon_regions:
ev = string.join(
[geneid + '\t' + probeset1 + '-' + probeset2 + '\t' + er] + adj_exp_vals,
'\t') + '\n'
if len(filtered_probeset_db) > 0:
if probeset1 in filtered_probeset_db and probeset2 in filtered_probeset_db:
adjoutput.write(
ev) ### This is used when we want to restrict to only probesets known to already by changed
else:
adjoutput.write(ev)
try:
anovaNIp = statistics.OneWayANOVA(
anova_test) ### This stays an ANOVA independent of the algorithm choosen since groups number > 2
except Exception:
anovaNIp = 'NA'
if len(dI_scores) > 0 and geneid in avg_const_exp_db:
dI, index1, index2 = dI_scores[-1];
count = 0
probesets = [probeset1, probeset2];
index = 0
key, jd = formatJunctionData([probeset1, probeset2], affygene, critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try:
jd.setSymbol(annotate_db[affygene].Symbol())
except Exception:
null = []
probeset_comp_db[
key] = jd ### This is used for the permutation analysis and domain/mirBS import
if max_replicates > 2 or equal_replicates == 2: permute_p_values[(probeset1, probeset2)] = [
anovaNIp, 'NA', 'NA', 'NA']
index = 0
for probeset in probesets:
if analysis_method == 'linearregres':
data_list1 = original_array_raw_group_values[probeset][index1];
data_list2 = original_array_raw_group_values[probeset][index2]
else:
data_list1 = array_raw_group_values[probeset][index1]; data_list2 = \
array_raw_group_values[probeset][index2]
baseline_exp = statistics.avg(data_list1);
experimental_exp = statistics.avg(data_list2);
fold_change = experimental_exp - baseline_exp
group_name1 = array_group_list[index1];
group_name2 = array_group_list[index2]
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1, data_list2,
probability_statistic)
except Exception:
ttest_exp_p = 'NA'
if ttest_exp_p == 1: ttest_exp_p = 'NA'
if index == 0:
try:
adj_fold = statistics.avg(adj_exp_lists1[index2]) - statistics.avg(
adj_exp_lists1[index1])
except Exception:
print raw_exp_vals1, raw_exp_vals2, avg_const_exp_db[geneid]
print probeset, probesets, adj_exp_lists1, adj_exp_lists2, index1, index2;
kill
ped1 = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold,
ttest_exp_p, group_name2 + '_vs_' + group_name1)
else:
adj_fold = statistics.avg(adj_exp_lists2[index2]) - statistics.avg(
adj_exp_lists2[index1])
ped2 = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold,
ttest_exp_p, group_name2 + '_vs_' + group_name1)
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2 - constit_exp1
index += 1
try:
pp1 = statistics.runComparisonStatistic(adj_exp_lists1[index1], adj_exp_lists1[index2],
probability_statistic)
pp2 = statistics.runComparisonStatistic(adj_exp_lists2[index1], adj_exp_lists2[index2],
probability_statistic)
except Exception:
pp1 = 'NA'; pp2 = 'NA'
if analysis_method == 'ASPIRE' and len(dI_scores) > 0:
p1 = JunctionExpressionData(adj_exp_lists1[index1], adj_exp_lists1[index2], pp1, ped1)
p2 = JunctionExpressionData(adj_exp_lists2[index1], adj_exp_lists2[index2], pp2, ped2)
### ANOVA p-replaces the below p-value
"""try: baseline_scores, exp_scores, pairwiseNIp = calculateAllASPIREScores(p1,p2)
except Exception: baseline_scores = [0]; exp_scores=[dI]; pairwiseNIp = 0 """
#if pairwiseNIp == 'NA': pairwiseNIp = 0 ### probably comment out
if len(dI_scores) > 0:
scores_examined += 1
if probeset in midas_db:
try:
midas_p = float(midas_db[probeset])
except ValueError:
midas_p = 'NA'
else:
midas_p = 'NA'
if dI > alt_exon_logfold_cutoff and (
anovaNIp < p_threshold or perform_permutation_analysis == 'yes' or anovaNIp == 'NA' or anovaNIp == 1): #and abs_log_ratio>1 and ttest_exp_p<0.05: ###and ge_threshold_count==2
#print [dI, probeset1,probeset2, anovaNIp, alt_exon_logfold_cutoff];kill
ejd = ExonJunctionData(dI, probeset1, probeset2, pp1, pp2, 'upregulated', event_call,
critical_exon_list, affygene, ped1, ped2)
ejd.setConstitutiveFold(ge_fold);
ejd.setConstitutiveExpression(constit_exp1)
if array_type == 'RNASeq':
ejd.setNovelEvent(novel_event)
splice_event_list.append((dI, ejd))
else:
excluded_probeset_db[
affygene + ':' + critical_exon_list[1][0]] = probeset1, affygene, dI, 'NA', anovaNIp
statistics.adjustPermuteStats(permute_p_values)
ex_db = splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db
original_fold_dbase = fold_dbase;
original_avg_const_exp_db = [];
nonlog_NI_db = [];
fold_dbase = []
summary_data_db['denominator_exp_events'] = events_examined
del avg_const_exp_db;
del gene_db;
del constitutive_gene_db;
gene_expression_diff_db = {}
if export_NI_values == 'yes': adjoutput.close()
print len(splice_event_list), 'alternative exons out of %s exon events examined' % events_examined
fold_dbase = [];
original_fold_dbase = [];
exon_db = [];
constitutive_gene_db = [];
addback_genedb = []
gene_db = [];
missing_genedb = []
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
return conditions, adj_fold_dbase, nonlog_NI_db, dataset_name, gene_expression_diff_db, midas_db, ex_db, si_db
class ProbesetExpressionData:
def __init__(self, baseline_exp, experimental_exp, fold_change, adj_fold, ttest_raw_exp, annotation):
self.baseline_exp = baseline_exp;
self.experimental_exp = experimental_exp
self.fold_change = fold_change;
self.adj_fold = adj_fold
self.ttest_raw_exp = ttest_raw_exp;
self.annotation = annotation
def BaselineExp(self): return str(self.baseline_exp)
def ExperimentalExp(self): return str(self.experimental_exp)
def FoldChange(self): return str(self.fold_change)
def AdjFold(self): return str(self.adj_fold)
def ExpPval(self): return str(self.ttest_raw_exp)
def Annotation(self): return self.annotation
def __repr__(self): return self.BaselineExp() + '|' + FoldChange()
def agglomerateInclusionProbesets(array_raw_group_values, exon_inclusion_db):
###Combine expression profiles for inclusion probesets that correspond to the same splice event
for excl_probeset in exon_inclusion_db:
inclusion_event_profiles = []
if len(exon_inclusion_db[excl_probeset]) > 1:
for incl_probeset in exon_inclusion_db[excl_probeset]:
if incl_probeset in array_raw_group_values and excl_probeset in array_raw_group_values:
array_group_values = array_raw_group_values[incl_probeset]
inclusion_event_profiles.append(array_group_values)
#del array_raw_group_values[incl_probeset] ###Remove un-agglomerated original entry
if len(inclusion_event_profiles) > 0: ###Thus, some probesets for this splice event in input file
combined_event_profile = combine_profiles(inclusion_event_profiles)
###Combine inclusion probesets into a single ID (identical manner to that in ExonAnnotate_module.identifyPutativeSpliceEvents
incl_probesets = exon_inclusion_db[excl_probeset]
incl_probesets_str = string.join(incl_probesets, '|')
array_raw_group_values[incl_probesets_str] = combined_event_profile
return array_raw_group_values
def combine_profiles(profile_list):
profile_group_sizes = {}
for db in profile_list:
for key in db: profile_group_sizes[key] = len(db[key])
break
new_profile_db = {}
for key in profile_group_sizes:
x = profile_group_sizes[key] ###number of elements in list for key
new_val_list = [];
i = 0
while i < x:
temp_val_list = []
for db in profile_list:
if key in db: val = db[key][i]; temp_val_list.append(val)
i += 1;
val_avg = statistics.avg(temp_val_list);
new_val_list.append(val_avg)
new_profile_db[key] = new_val_list
return new_profile_db
def constitutive_exp_normalization(fold_db, stats_dbase, exon_db, constitutive_probeset_db):
"""For every expression value, normalize to the expression of the constitutive gene features for that condition,
then store those ratios (probeset_exp/avg_constitutive_exp) and regenerate expression values relative only to the
baseline avg_constitutive_exp, for all conditions, to normalize out gene expression changes"""
#print "\nParameters:"
#print "Factor_out_expression_changes:",factor_out_expression_changes
#print "Only_include_constitutive_containing_genes:",only_include_constitutive_containing_genes
#print "\nAdjusting probeset average intensity values to factor out condition specific expression changes for optimal splicing descrimination"
gene_db = {};
constitutive_gene_db = {}
### organize everything by gene
for probeset in fold_db: conditions = len(fold_db[probeset]); break
remove_diff_exp_genes = remove_transcriptional_regulated_genes
if conditions > 2: remove_diff_exp_genes = 'no'
for probeset in exon_db:
affygene = exon_db[
probeset].GeneID() #exon_db[probeset] = affygene,exons,ensembl,block_exon_ids,block_structure,comparison_info
if probeset in fold_db:
try:
gene_db[affygene].append(probeset)
except KeyError:
gene_db[affygene] = [probeset]
if probeset in constitutive_probeset_db and (
only_include_constitutive_containing_genes == 'yes' or factor_out_expression_changes == 'no'):
#the second conditional is used to exlcude constitutive data if we wish to use all probesets for
#background normalization rather than just the designated 'gene' probesets.
if probeset in stats_dbase:
try:
constitutive_gene_db[affygene].append(probeset)
except KeyError:
constitutive_gene_db[affygene] = [probeset]
if len(constitutive_gene_db) > 0:
###This is blank when there are no constitutive and the above condition is implemented
gene_db2 = constitutive_gene_db
else:
gene_db2 = gene_db
avg_const_exp_db = {}
for affygene in gene_db2:
probeset_list = gene_db2[affygene]
x = 0
while x < conditions:
### average all exp values for constitutive probesets for each condition
exp_list = []
for probeset in probeset_list:
probe_fold_val = fold_db[probeset][x]
baseline_exp = stats_dbase[probeset][0]
exp_val = probe_fold_val + baseline_exp
exp_list.append(exp_val)
avg_const_exp = statistics.avg(exp_list)
try:
avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError:
avg_const_exp_db[affygene] = [avg_const_exp]
x += 1
adj_fold_dbase = {};
nonlog_NI_db = {};
constitutive_fold_change = {}
for affygene in avg_const_exp_db: ###If we only wish to include propper constitutive probes, this will ensure we only examine those genes and probesets that are constitutive
probeset_list = gene_db[affygene]
x = 0
while x < conditions:
exp_list = []
for probeset in probeset_list:
expr_to_subtract = avg_const_exp_db[affygene][x]
baseline_const_exp = avg_const_exp_db[affygene][0]
probe_fold_val = fold_db[probeset][x]
baseline_exp = stats_dbase[probeset][0]
exp_val = probe_fold_val + baseline_exp
exp_val_non_log = statistics.log_fold_conversion_fraction(exp_val)
expr_to_subtract_non_log = statistics.log_fold_conversion_fraction(expr_to_subtract)
baseline_const_exp_non_log = statistics.log_fold_conversion_fraction(baseline_const_exp)
if factor_out_expression_changes == 'yes':
exp_splice_valff = exp_val_non_log / expr_to_subtract_non_log
else: #if no, then we just normalize to the baseline constitutive expression in order to keep gene expression effects (useful if you don't trust constitutive feature expression levels)
exp_splice_valff = exp_val_non_log / baseline_const_exp_non_log
constitutive_fold_diff = expr_to_subtract_non_log / baseline_const_exp_non_log
###To calculate adjusted expression, we need to get the fold change in the constitutive avg (expr_to_subtract/baseline_const_exp) and divide the experimental expression
###By this fold change.
ge_adj_exp_non_log = exp_val_non_log / constitutive_fold_diff #gives a GE adjusted expression
try:
ge_adj_exp = math.log(ge_adj_exp_non_log, 2)
except ValueError:
print probeset, ge_adj_exp_non_log, constitutive_fold_diff, exp_val_non_log, exp_val, baseline_exp, probe_fold_val, dog
adj_probe_fold_val = ge_adj_exp - baseline_exp
### Here we normalize probeset expression to avg-constitutive expression by dividing probe signal by avg const.prove sig (should be < 1)
### refered to as steady-state normalization
if array_type != 'AltMouse' or (probeset not in constitutive_probeset_db):
"""Can't use constitutive gene features since these have no variance for pearson analysis
Python will approximate numbers to a small decimal point range. If the first fold value is
zero, often, zero will be close to but not exactly zero. Correct below """
try:
adj_fold_dbase[probeset].append(adj_probe_fold_val)
except KeyError:
if abs(adj_probe_fold_val - 0) < 0.0000001: #make zero == exactly to zero
adj_probe_fold_val = 0
adj_fold_dbase[probeset] = [adj_probe_fold_val]
try:
nonlog_NI_db[probeset].append(
exp_splice_valff) ###ratio of junction exp relative to gene expression at that time-point
except KeyError:
nonlog_NI_db[probeset] = [exp_splice_valff]
n = 0
#if expr_to_subtract_non_log != baseline_const_exp_non_log: ###otherwise this is the first value in the expression array
if x != 0: ###previous expression can produce errors when multiple group averages have identical values
fold_change = expr_to_subtract_non_log / baseline_const_exp_non_log
fold_change_log = math.log(fold_change, 2)
constitutive_fold_change[affygene] = fold_change_log
### If we want to remove any genes from the analysis with large transcriptional changes
### that may lead to false positive splicing calls (different probeset kinetics)
if remove_diff_exp_genes == 'yes':
if abs(fold_change_log) > log_fold_cutoff:
del constitutive_fold_change[affygene]
try:
del adj_fold_dbase[probeset]
except KeyError:
n = 1
try:
del nonlog_NI_db[probeset]
except KeyError:
n = 1
"""elif expr_to_subtract_non_log == baseline_const_exp_non_log: ###This doesn't make sense, since n can't equal 1 if the conditional is false (check this code again later 11/23/07)
if n == 1:
del adj_fold_dbase[probeset]
del nonlog_NI_db[probeset]"""
x += 1
print "Intensity normalization complete..."
if factor_out_expression_changes == 'no':
adj_fold_dbase = fold_db #don't change expression values
print len(constitutive_fold_change), "genes undergoing analysis for alternative splicing/transcription"
summary_data_db['denominator_exp_genes'] = len(constitutive_fold_change)
"""
mir_gene_count = 0
for gene in constitutive_fold_change:
if gene in gene_microRNA_denom: mir_gene_count+=1
print mir_gene_count, "Genes with predicted microRNA binding sites undergoing analysis for alternative splicing/transcription"
"""
global gene_analyzed;
gene_analyzed = len(constitutive_gene_db)
return adj_fold_dbase, nonlog_NI_db, conditions, gene_db, constitutive_gene_db, constitutive_fold_change, avg_const_exp_db
class TranscriptionData:
def __init__(self, constitutive_fold, rna_processing_annotation):
self._constitutive_fold = constitutive_fold;
self._rna_processing_annotation = rna_processing_annotation
def ConstitutiveFold(self): return self._constitutive_fold
def ConstitutiveFoldStr(self): return str(self._constitutive_fold)
def RNAProcessing(self): return self._rna_processing_annotation
def __repr__(self): return self.ConstitutiveFoldStr() + '|' + RNAProcessing()
def constitutive_expression_changes(constitutive_fold_change, annotate_db):
###Add in constitutive fold change filter to assess gene expression for ASPIRE
gene_expression_diff_db = {}
for affygene in constitutive_fold_change:
constitutive_fold = constitutive_fold_change[affygene];
rna_processing_annotation = ''
if affygene in annotate_db:
if len(annotate_db[affygene].RNAProcessing()) > 4: rna_processing_annotation = annotate_db[
affygene].RNAProcessing()
###Add in evaluation of RNA-processing/binding factor
td = TranscriptionData(constitutive_fold, rna_processing_annotation)
gene_expression_diff_db[affygene] = td
return gene_expression_diff_db
def constitutive_exp_normalization_raw(gene_db, constitutive_gene_db, array_raw_group_values, exon_db, y,
avg_const_exp_db):
"""normalize expression for raw expression data (only for non-baseline data)"""
#avg_true_const_exp_db[affygene] = [avg_const_exp]
temp_avg_const_exp_db = {}
for probeset in array_raw_group_values:
conditions = len(array_raw_group_values[probeset][y]);
break #number of raw expresson values to normalize
for affygene in gene_db:
###This is blank when there are no constitutive or the above condition is implemented
if affygene in constitutive_gene_db:
probeset_list = constitutive_gene_db[affygene]
z = 1
else: ###so we can analyze splicing independent of gene expression even if no 'gene' feature is present
probeset_list = gene_db[affygene]
z = 0
x = 0
while x < conditions:
### average all exp values for constitutive probesets for each conditionF
exp_list = []
for probeset in probeset_list:
try:
exp_val = array_raw_group_values[probeset][y][
x] ### try statement is used for constitutive probes that were deleted due to filtering in performExpressionAnalysis
except KeyError:
continue
exp_list.append(exp_val)
try:
avg_const_exp = statistics.avg(exp_list)
except Exception:
avg_const_exp = 'null'
if only_include_constitutive_containing_genes == 'yes' and avg_const_exp != 'null':
if z == 1:
try:
avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError:
avg_const_exp_db[affygene] = [avg_const_exp]
try:
temp_avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError:
temp_avg_const_exp_db[affygene] = [avg_const_exp]
elif avg_const_exp != 'null': ###***
try:
avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError:
avg_const_exp_db[affygene] = [avg_const_exp]
try:
temp_avg_const_exp_db[affygene].append(avg_const_exp)
except KeyError:
temp_avg_const_exp_db[affygene] = [avg_const_exp]
x += 1
if analysis_method == 'ANOVA':
global normalized_raw_exp_ratios;
normalized_raw_exp_ratios = {}
for affygene in gene_db:
probeset_list = gene_db[affygene]
for probeset in probeset_list:
while x < group_size:
new_ratios = [] ### Calculate expression ratios relative to constitutive expression
exp_val = array_raw_group_values[probeset][y][x]
const_exp_val = temp_avg_const_exp_db[affygene][x]
###Since the above dictionary is agglomerating all constitutive expression values for permutation,
###we need an unbiased way to grab just those relevant const. exp. vals. (hence the temp dictionary)
#non_log_exp_val = statistics.log_fold_conversion_fraction(exp_val)
#non_log_const_exp_val = statistics.log_fold_conversion_fraction(const_exp_val)
#non_log_exp_ratio = non_log_exp_val/non_log_const_exp_val
log_exp_ratio = exp_val - const_exp_val
try:
normalized_raw_exp_ratios[probeset].append(log_exp_ratio)
except KeyError:
normalized_raw_exp_ratios[probeset] = [log_exp_ratio]
return avg_const_exp_db
######### Z Score Analyses #######
class ZScoreData:
def __init__(self, element, changed, measured, zscore, null_z, gene_symbols):
self._element = element;
self._changed = changed;
self._measured = measured
self._zscore = zscore;
self._null_z = null_z;
self._gene_symbols = gene_symbols
def ElementID(self):
return self._element
def Changed(self):
return str(self._changed)
def Measured(self):
return str(self._measured)
def AssociatedWithElement(self):
return str(self._gene_symbols)
def ZScore(self):
return str(self._zscore)
def SetP(self, p):
self._permute_p = p
def PermuteP(self):
return str(self._permute_p)
def SetAdjP(self, adjp):
self._adj_p = adjp
def AdjP(self):
return str(self._adj_p)
def PercentChanged(self):
try:
pc = float(self.Changed()) / float(self.Measured()) * 100
except Exception:
pc = 0
return str(pc)
def NullZ(self):
return self._null_z
def Report(self):
output = self.ElementID()
return output
def __repr__(self):
return self.Report()
class FDRStats(ZScoreData):
def __init__(self, p): self._permute_p = p
def AdjP(self): return str(self._adj_p)
def countGenesForElement(permute_input_list, probeset_to_gene, probeset_element_db):
element_gene_db = {}
for probeset in permute_input_list:
try:
element_list = probeset_element_db[probeset]
gene = probeset_to_gene[probeset]
for element in element_list:
try:
element_gene_db[element].append(gene)
except KeyError:
element_gene_db[element] = [gene]
except KeyError:
null = []
### Count the number of unique genes per element
for element in element_gene_db:
t = {}
for i in element_gene_db[element]: t[i] = []
element_gene_db[element] = len(t)
return element_gene_db
def formatGeneSymbolHits(geneid_list):
symbol_list = []
for geneid in geneid_list:
symbol = ''
if geneid in annotate_db: symbol = annotate_db[geneid].Symbol()
if len(symbol) < 1: symbol = geneid
symbol_list.append(symbol)
symbol_str = string.join(symbol_list, ', ')
return symbol_str
def zscore(r, n, N, R):
z = (r - n * (R / N)) / math.sqrt(
n * (R / N) * (1 - (R / N)) * (1 - ((n - 1) / (N - 1)))) #z = statistics.zscore(r,n,N,R)
return z
def calculateZScores(hit_count_db, denom_count_db, total_gene_denom_count, total_gene_hit_count, element_type):
N = float(total_gene_denom_count) ###Genes examined
R = float(total_gene_hit_count) ###AS genes
for element in denom_count_db:
element_denom_gene_count = denom_count_db[element]
n = float(element_denom_gene_count) ###all genes associated with element
if element in hit_count_db:
element_hit_gene_count = len(hit_count_db[element])
gene_symbols = formatGeneSymbolHits(hit_count_db[element])
r = float(element_hit_gene_count) ###regulated genes associated with element
else:
r = 0; gene_symbols = ''
try:
z = zscore(r, n, N, R)
except Exception:
z = 0; #print 'error:',element,r,n,N,R; kill
try:
null_z = zscore(0, n, N, R)
except Exception:
null_z = 0; #print 'error:',element,r,n,N,R; kill
zsd = ZScoreData(element, r, n, z, null_z, gene_symbols)
if element_type == 'domain':
original_domain_z_score_data[element] = zsd
elif element_type == 'microRNA':
original_microRNA_z_score_data[element] = zsd
permuted_z_scores[element] = [z]
if perform_element_permutation_analysis == 'no':
### The below is an alternative to the permute t-statistic that is more effecient
p = FishersExactTest(r, n, R, N)
zsd.SetP(p)
return N, R
######### Begin Permutation Analysis #######
def calculatePermuteZScores(permute_element_inputs, element_denominator_gene_count, N, R):
###Make this code as efficient as possible
for element_input_gene_count in permute_element_inputs:
for element in element_input_gene_count:
r = element_input_gene_count[element]
n = element_denominator_gene_count[element]
try:
z = statistics.zscore(r, n, N, R)
except Exception:
z = 0
permuted_z_scores[element].append(abs(z))
#if element == '0005488':
#a.append(r)
def calculatePermuteStats(original_element_z_score_data):
for element in original_element_z_score_data:
zsd = original_element_z_score_data[element]
z = abs(permuted_z_scores[element][0])
permute_scores = permuted_z_scores[element][1:] ###Exclude the true value
nullz = zsd.NullZ()
if abs(
nullz) == z: ###Only add the nullz values if they can count towards the p-value (if equal to the original z)
null_z_to_add = permutations - len(permute_scores)
permute_scores += [abs(
nullz)] * null_z_to_add ###Add null_z's in proportion to the amount of times there were not genes found for that element
if len(permute_scores) > 0:
p = permute_p(permute_scores, z)
else:
p = 1
#if p>1: p=1
zsd.SetP(p)
def FishersExactTest(r, n, R, N):
a = r;
b = n - r;
c = R - r;
d = N - R - b
table = [[int(a), int(b)], [int(c), int(d)]]
try: ### Scipy version - cuts down rutime by ~1/3rd the time
oddsratio, pvalue = stats.fisher_exact(table)
return pvalue
except Exception:
ft = fishers_exact_test.FishersExactTest(table)
return ft.two_tail_p()
def adjustPermuteStats(original_element_z_score_data):
#1. Sort ascending the original input p value vector. Call this spval. Keep the original indecies so you can sort back.
#2. Define a new vector called tmp. tmp= spval. tmp will contain the BH p values.
#3. m is the length of tmp (also spval)
#4. i=m-1
#5 tmp[ i ]=min(tmp[i+1], min((m/i)*spval[ i ],1)) - second to last, last, last/second to last
#6. i=m-2
#7 tmp[ i ]=min(tmp[i+1], min((m/i)*spval[ i ],1))
#8 repeat step 7 for m-3, m-4,... until i=1
#9. sort tmp back to the original order of the input p values.
spval = []
for element in original_element_z_score_data:
zsd = original_element_z_score_data[element]
p = float(zsd.PermuteP())
spval.append([p, element])
spval.sort();
tmp = spval;
m = len(spval);
i = m - 2;
x = 0 ###Step 1-4
while i > -1:
tmp[i] = min(tmp[i + 1][0], min((float(m) / (i + 1)) * spval[i][0], 1)), tmp[i][1];
i -= 1
for (adjp, element) in tmp:
zsd = original_element_z_score_data[element]
zsd.SetAdjP(adjp)
spval = []
def permute_p(null_list, true_value):
y = 0;
z = 0;
x = permutations
for value in null_list:
if value >= true_value: y += 1
#if true_value > 8: global a; a = null_list; print true_value,y,x;kill
return (float(y) / float(x)) ###Multiply probabilty x2?
######### End Permutation Analysis #######
def exportZScoreData(original_element_z_score_data, element_type):
element_output = root_dir + 'AltResults/AlternativeOutput/' + dataset_name + analysis_method + '-' + element_type + '-zscores.txt'
data = export.ExportFile(element_output)
headers = [element_type + '-Name', 'Number Changed', 'Number Measured', 'Percent Changed', 'Zscore', 'PermuteP',
'AdjP', 'Changed GeneSymbols']
headers = string.join(headers, '\t') + '\n'
data.write(headers);
sort_results = []
#print "Results for",len(original_element_z_score_data),"elements exported to",element_output
for element in original_element_z_score_data:
zsd = original_element_z_score_data[element]
try:
results = [zsd.Changed(), zsd.Measured(), zsd.PercentChanged(), zsd.ZScore(), zsd.PermuteP(), zsd.AdjP(),
zsd.AssociatedWithElement()]
except AttributeError:
print element, len(permuted_z_scores[element]);kill
results = [element] + results
results = string.join(results, '\t') + '\n'
sort_results.append([float(zsd.PermuteP()), -1 / float(zsd.Measured()), results])
sort_results.sort()
for values in sort_results:
results = values[2]
data.write(results)
data.close()
def getInputsForPermutationAnalysis(exon_db):
### Filter fold_dbase, which is the proper denominator
probeset_to_gene = {};
denominator_list = []
for probeset in exon_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else:
proceed = 'yes'
if proceed == 'yes':
gene = exon_db[probeset].GeneID()
probeset_to_gene[probeset] = gene
denominator_list.append(probeset)
return probeset_to_gene, denominator_list
def getJunctionSplicingAnnotations(regulated_exon_junction_db):
filter_status = 'yes'
########### Import critical exon annotation for junctions, build through the exon array analysis pipeline - link back to probesets
filtered_arrayids = {};
critical_probeset_annotation_db = {}
if array_type == 'RNASeq' and explicit_data_type == 'null':
critical_exon_annotation_file = root_dir + 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_exons.txt'
elif array_type == 'RNASeq' and explicit_data_type != 'null':
critical_exon_annotation_file = root_dir + 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_junctions.txt'
else:
critical_exon_annotation_file = "AltDatabase/" + species + "/" + array_type + "/" + species + "_Ensembl_" + array_type + "_probesets.txt"
critical_exon_annotation_file = filename = getFilteredFilename(critical_exon_annotation_file)
for uid in regulated_exon_junction_db:
gene = regulated_exon_junction_db[uid].GeneID()
critical_exons = regulated_exon_junction_db[uid].CriticalExons()
"""### It appears that each critical exon for junction arrays can be a concatenation of multiple exons, making this unnecessary
if len(critical_exons)>1 and array_type == 'junction':
critical_exons_joined = string.join(critical_exons,'|')
filtered_arrayids[gene+':'+critical_exon].append(uid)"""
for critical_exon in critical_exons:
try:
try:
filtered_arrayids[gene + ':' + critical_exon].append(uid)
except TypeError:
print gene, critical_exon, uid;kill
except KeyError:
filtered_arrayids[gene + ':' + critical_exon] = [uid]
critical_exon_annotation_db = importSplicingAnnotationDatabase(critical_exon_annotation_file, 'exon-fake',
filtered_arrayids, filter_status);
null = [] ###The file is in exon centric format, so designate array_type as exon
for key in critical_exon_annotation_db:
ced = critical_exon_annotation_db[key]
for junction_probesets in filtered_arrayids[key]:
try:
critical_probeset_annotation_db[junction_probesets].append(ced) ###use for splicing and Exon annotations
except KeyError:
critical_probeset_annotation_db[junction_probesets] = [ced]
for junction_probesets in critical_probeset_annotation_db:
if len(critical_probeset_annotation_db[
junction_probesets]) > 1: ###Thus multiple exons associated, must combine annotations
exon_ids = [];
external_exonids = [];
exon_regions = [];
splicing_events = []
for ed in critical_probeset_annotation_db[junction_probesets]:
ensembl_gene_id = ed.GeneID();
transcript_cluster_id = ed.ExternalGeneID()
exon_ids.append(ed.ExonID());
external_exonids.append(ed.ExternalExonIDs());
exon_regions.append(ed.ExonRegionID());
se = string.split(ed.SplicingEvent(), '|')
for i in se: splicing_events.append(i)
splicing_events = unique.unique(splicing_events) ###remove duplicate entries
exon_id = string.join(exon_ids, '|');
external_exonid = string.join(external_exonids, '|');
exon_region = string.join(exon_regions, '|');
splicing_event = string.join(splicing_events, '|')
probe_data = AffyExonSTData(ensembl_gene_id, exon_id, external_exonid, '', exon_region, splicing_event, '',
'')
if array_type != 'RNASeq': probe_data.setTranscriptCluster(transcript_cluster_id)
critical_probeset_annotation_db[junction_probesets] = probe_data
else:
critical_probeset_annotation_db[junction_probesets] = critical_probeset_annotation_db[junction_probesets][0]
return critical_probeset_annotation_db
def determineExternalType(external_probeset_db):
external_probeset_db2 = {}
if 'TC' in external_probeset_db:
temp_index = {};
i = 0;
type = 'JETTA'
for name in external_probeset_db['TC'][0]: temp_index[i] = i; i += 1
if 'PS:norm_expr_fold_change' in temp_index: NI_fold_index = temp_index['PS:norm_expr_fold_change']
if 'MADS:pv_1over2' in temp_index: MADS_p1_index = temp_index['MADS:pv_1over2']
if 'MADS:pv_2over1' in temp_index: MADS_p2_index = temp_index['MADS:pv_2over1']
if 'TC:expr_fold_change' in temp_index: MADS_p2_index = temp_index['MADS:pv_2over1']
if 'PsId' in temp_index: ps_index = temp_index['PsId']
for tc in external_probeset_db:
for list in external_probeset_db[tc]:
try:
NI_fold = float(list[NI_fold_index])
except Exception:
NI_fold = 1
try:
MADSp1 = float(list[MADS_p1_index])
except Exception:
MADSp1 = 1
try:
MADSp2 = float(list[MADS_p2_index])
except Exception:
MADSp1 = 1
if MADSp1 < MADSp2:
pval = MADSp1
else:
pval = MADSp2
probeset = list[ps_index]
external_probeset_db2[probeset] = NI_fold, pval
else:
type = 'generic'
a = [];
b = []
for id in external_probeset_db:
#print external_probeset_db[id]
try:
a.append(abs(float(external_probeset_db[id][0][0])))
except Exception:
null = []
try:
b.append(abs(float(external_probeset_db[id][0][1])))
except Exception:
null = []
a.sort();
b.sort();
pval_index = None;
score_index = None
if len(a) > 0:
if max(a) > 1:
score_index = 0
else:
pval_index = 0
if len(b) > 0:
if max(b) > 1:
score_index = 1
else:
pval_index = 1
for id in external_probeset_db:
if score_index != None:
score = external_probeset_db[id][0][score_index]
else:
score = 1
if pval_index != None:
pval = external_probeset_db[id][0][pval_index]
else:
pval = 1
external_probeset_db2[id] = score, pval
return external_probeset_db2, type
def importExternalProbesetData(dataset_dir):
excluded_probeset_db = {};
splice_event_list = [];
p_value_call = {};
permute_p_values = {};
gene_expression_diff_db = {}
analyzed_probeset_db = {}
external_probeset_db = importExternalDBList(dataset_dir)
external_probeset_db, ext_type = determineExternalType(external_probeset_db)
for probeset in exon_db: analyzed_probeset_db[probeset] = []
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db) > 0:
temp_db = {}
for probeset in analyzed_probeset_db: temp_db[probeset] = []
for probeset in temp_db:
try:
filtered_probeset_db[probeset]
except KeyError:
del analyzed_probeset_db[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try:
del analyzed_probeset_db[probeset]
except KeyError:
null = []
for probeset in analyzed_probeset_db:
ed = exon_db[probeset];
geneid = ed.GeneID()
td = TranscriptionData('', '');
gene_expression_diff_db[geneid] = td
if probeset in external_probeset_db:
exonid = ed.ExonID();
critical_exon_list = [1, [exonid]]
splicing_index, normIntensityP = external_probeset_db[probeset]
group1_ratios = [];
group2_ratios = [];
exp_log_ratio = '';
ttest_exp_p = '';
normIntensityP = '';
opposite_SI_log_mean = ''
sid = ExonData(splicing_index, probeset, critical_exon_list, geneid, group1_ratios, group2_ratios,
normIntensityP, opposite_SI_log_mean)
splice_event_list.append((splicing_index, sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(0, geneid, 'NA')
excluded_probeset_db[probeset] = eed
print len(splice_event_list), 'pre-filtered external results imported...\n'
return splice_event_list, p_value_call, permute_p_values, excluded_probeset_db, gene_expression_diff_db
def splicingAnalysisAlgorithms(nonlog_NI_db, fold_dbase, dataset_name, gene_expression_diff_db, exon_db, ex_db, si_db,
dataset_dir):
protein_exon_feature_db = {};
global regulated_exon_junction_db;
global critical_exon_annotation_db;
global probeset_comp_db;
probeset_comp_db = {}
if original_conditions == 2: print "Beginning to run", analysis_method, "algorithm on", dataset_name[0:-1], "data"
if run_from_scratch == 'Annotate External Results':
splice_event_list, p_value_call, permute_p_values, excluded_probeset_db, gene_expression_diff_db = importExternalProbesetData(
dataset_dir)
elif analysis_method == 'ASPIRE' or analysis_method == 'linearregres':
original_exon_db = exon_db
if original_conditions > 2:
splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db = ex_db
splice_event_list, p_value_call, permute_p_values, exon_db, regulated_exon_junction_db = furtherProcessJunctionScores(
splice_event_list, probeset_comp_db, permute_p_values)
else:
splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db = analyzeJunctionSplicing(
nonlog_NI_db)
splice_event_list, p_value_call, permute_p_values, exon_db, regulated_exon_junction_db = furtherProcessJunctionScores(
splice_event_list, probeset_comp_db, permute_p_values)
elif analysis_method == 'splicing-index':
regulated_exon_junction_db = {}
if original_conditions > 2:
excluded_probeset_db = ex_db;
splice_event_list = si_db;
clearObjectsFromMemory(ex_db);
clearObjectsFromMemory(si_db)
ex_db = [];
si_db = [];
permute_p_values = {};
p_value_call = ''
else:
splice_event_list, p_value_call, permute_p_values, excluded_probeset_db = analyzeSplicingIndex(fold_dbase)
elif analysis_method == 'FIRMA':
regulated_exon_junction_db = {}
splice_event_list, p_value_call, permute_p_values, excluded_probeset_db = FIRMAanalysis(fold_dbase)
global permuted_z_scores;
permuted_z_scores = {};
global original_domain_z_score_data;
original_domain_z_score_data = {}
global original_microRNA_z_score_data;
original_microRNA_z_score_data = {}
nonlog_NI_db = [] ### Clear memory of this large dictionary
try:
clearObjectsFromMemory(original_avg_const_exp_db); clearObjectsFromMemory(array_raw_group_values)
except Exception:
null = []
try:
clearObjectsFromMemory(avg_const_exp_db)
except Exception:
null = []
try:
clearObjectsFromMemory(alt_junction_db)
except Exception:
null = []
try:
clearObjectsFromMemory(fold_dbase); fold_dbase = []
except Exception:
null = []
microRNA_full_exon_db, microRNA_count_db, gene_microRNA_denom = ExonAnalyze_module.importmicroRNADataExon(species,
array_type,
exon_db,
microRNA_prediction_method,
explicit_data_type,
root_dir)
#print "MicroRNA data imported"
if use_direct_domain_alignments_only == 'yes':
protein_ft_db_len, domain_associated_genes = importProbesetAligningDomains(exon_db, 'gene')
else:
protein_ft_db_len, domain_associated_genes = importProbesetProteinCompDomains(exon_db, 'gene', 'exoncomp')
if perform_element_permutation_analysis == 'yes':
probeset_to_gene, denominator_list = getInputsForPermutationAnalysis(exon_db)
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
exon_gene_array_translation_file = 'AltDatabase/' + species + '/' + array_type + '/' + species + '_' + array_type + '-exon_probesets.txt'
try:
exon_array_translation_db = importGeneric(exon_gene_array_translation_file)
except Exception:
exon_array_translation_db = {} ### Not present for all species
exon_hits = {};
clearObjectsFromMemory(probeset_comp_db);
probeset_comp_db = []
###Run analyses in the ExonAnalyze_module module to assess functional changes
for (score, ed) in splice_event_list:
geneid = ed.GeneID()
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
pl = string.split(ed.Probeset1(), '|');
probeset1 = pl[0] ### When agglomerated, this is important
uid = (probeset1, ed.Probeset2())
else:
uid = ed.Probeset1()
gene_exon = geneid, uid;
exon_hits[gene_exon] = ed
#print probeset1,ed.Probeset1(),ed.Probeset2(),gene_exon,ed.CriticalExons()
dataset_name_original = analysis_method + '-' + dataset_name[8:-1]
global functional_attribute_db;
global protein_features
### Possibly Block-out code for DomainGraph export
########### Re-import the exon_db for significant entries with full annotaitons
exon_db = {};
filtered_arrayids = {};
filter_status = 'yes' ###Use this as a means to save memory (import multiple times - only storing different types relevant information)
for (score, entry) in splice_event_list:
try:
probeset = original_exon_db[entry.Probeset1()].Probeset()
except Exception:
probeset = entry.Probeset1()
pl = string.split(probeset, '|');
probeset = pl[0];
filtered_arrayids[probeset] = [] ### When agglomerated, this is important
if array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
try:
probeset = entry.Probeset2(); filtered_arrayids[probeset] = []
except AttributeError:
null = [] ###occurs when running Splicing
exon_db = importSplicingAnnotationDatabase(probeset_annotations_file, array_type, filtered_arrayids, filter_status);
null = [] ###replace existing exon_db (probeset_annotations_file should be a global)
###domain_gene_changed_count_db is the number of genes for each domain that are found for regulated probesets
if array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
if use_direct_domain_alignments_only == 'yes':
protein_features, domain_gene_changed_count_db, functional_attribute_db = importProbesetAligningDomains(
regulated_exon_junction_db, 'probeset')
else:
protein_features, domain_gene_changed_count_db, functional_attribute_db = importProbesetProteinCompDomains(
regulated_exon_junction_db, 'probeset', 'exoncomp')
else:
if use_direct_domain_alignments_only == 'yes':
protein_features, domain_gene_changed_count_db, functional_attribute_db = importProbesetAligningDomains(
exon_db, 'probeset')
else:
protein_features, domain_gene_changed_count_db, functional_attribute_db = importProbesetProteinCompDomains(
exon_db, 'probeset', 'exoncomp')
filtered_microRNA_exon_db = ExonAnalyze_module.filterMicroRNAProbesetAssociations(microRNA_full_exon_db, exon_hits)
microRNA_full_exon_db = []
###add microRNA data to functional_attribute_db
microRNA_hit_gene_count_db = {};
all_microRNA_gene_hits = {};
microRNA_attribute_db = {};
probeset_mirBS_db = {}
for (affygene,
uid) in filtered_microRNA_exon_db: ###example ('G7091354', 'E20|') [('hsa-miR-130a', 'Pbxip1'), ('hsa-miR-130a', 'Pbxip1'
###3-1-08
miR_list = []
microRNA_symbol_list = filtered_microRNA_exon_db[(affygene, uid)]
for mir_key in microRNA_symbol_list:
microRNA, gene_symbol, miR_seq, miR_sources = mir_key
#if 'ENS' in microRNA: print microRNA; kill ### bug in some miRNA annotations introduced in the build process
specific_microRNA_tuple = (microRNA, '~')
try:
microRNA_hit_gene_count_db[microRNA].append(affygene)
except KeyError:
microRNA_hit_gene_count_db[microRNA] = [affygene]
###Create a database with the same structure as "protein_exon_feature_db"(below) for over-representation analysis (direction specific), after linking up splice direction data
try:
microRNA_attribute_db[(affygene, uid)].append(specific_microRNA_tuple)
except KeyError:
microRNA_attribute_db[(affygene, uid)] = [specific_microRNA_tuple]
miR_data = microRNA + ':' + miR_sources
miR_list.append(miR_data) ###Add miR information to the record
function_type = ('miR-sequence: ' + '(' + miR_data + ')' + miR_seq,
'~') ###Add miR sequence information to the sequence field of the report
try:
functional_attribute_db[(affygene, uid)].append(function_type)
except KeyError:
functional_attribute_db[(affygene, uid)] = [function_type]
#print (affygene,uid), [function_type];kill
if perform_element_permutation_analysis == 'yes':
try:
probeset_mirBS_db[uid].append(microRNA)
except KeyError:
probeset_mirBS_db[uid] = [microRNA]
miR_str = string.join(miR_list, ',');
miR_str = '(' + miR_str + ')'
function_type = ('microRNA-target' + miR_str, '~')
try:
functional_attribute_db[(affygene, uid)].append(function_type)
except KeyError:
functional_attribute_db[(affygene, uid)] = [function_type]
all_microRNA_gene_hits[affygene] = []
###Replace the gene list for each microRNA hit with count data
microRNA_hit_gene_count_db = eliminate_redundant_dict_values(microRNA_hit_gene_count_db)
###Combines any additional feature alignment info identified from 'ExonAnalyze_module.characterizeProteinLevelExonChanges' (e.g. from Ensembl or junction-based queries rather than exon specific) and combines
###this with this database of (Gene,Exon)=[(functional element 1,'~'),(functional element 2,'~')] for downstream result file annotatations
domain_hit_gene_count_db = {};
all_domain_gene_hits = {};
probeset_domain_db = {}
for entry in protein_features:
gene, uid = entry
for data_tuple in protein_features[entry]:
domain, call = data_tuple
try:
protein_exon_feature_db[entry].append(data_tuple)
except KeyError:
protein_exon_feature_db[entry] = [data_tuple]
try:
domain_hit_gene_count_db[domain].append(gene)
except KeyError:
domain_hit_gene_count_db[domain] = [gene]
all_domain_gene_hits[gene] = []
if perform_element_permutation_analysis == 'yes':
try:
probeset_domain_db[uid].append(domain)
except KeyError:
probeset_domain_db[uid] = [domain]
protein_features = [];
domain_gene_changed_count_db = []
###Replace the gene list for each microRNA hit with count data
domain_hit_gene_count_db = eliminate_redundant_dict_values(domain_hit_gene_count_db)
############ Perform Element Over-Representation Analysis ############
"""Domain/FT Fishers-Exact test: with "protein_exon_feature_db" (transformed to "domain_hit_gene_count_db") we can analyze over-representation of domain/features WITHOUT taking into account exon-inclusion or exclusion
Do this using: "domain_associated_genes", which contains domain tuple ('Tyr_pkinase', 'IPR001245') as a key and count in unique genes as the value in addition to
Number of genes linked to splice events "regulated" (SI and Midas p<0.05), number of genes with constitutive probesets
MicroRNA Fishers-Exact test: "filtered_microRNA_exon_db" contains gene/exon to microRNA data. For each microRNA, count the representation in spliced genes microRNA (unique gene count - make this from the mentioned file)
Do this using: "microRNA_count_db"""
domain_gene_counts = {} ### Get unique gene counts for each domain
for domain in domain_associated_genes:
domain_gene_counts[domain] = len(domain_associated_genes[domain])
total_microRNA_gene_hit_count = len(all_microRNA_gene_hits)
total_microRNA_gene_denom_count = len(gene_microRNA_denom)
Nm, Rm = calculateZScores(microRNA_hit_gene_count_db, microRNA_count_db, total_microRNA_gene_denom_count,
total_microRNA_gene_hit_count, 'microRNA')
gene_microRNA_denom = []
summary_data_db['miRNA_gene_denom'] = total_microRNA_gene_denom_count
summary_data_db['miRNA_gene_hits'] = total_microRNA_gene_hit_count
summary_data_db['alt_events'] = len(splice_event_list)
total_domain_gene_hit_count = len(all_domain_gene_hits)
total_domain_gene_denom_count = protein_ft_db_len ###genes connected to domain annotations
Nd, Rd = calculateZScores(domain_hit_gene_count_db, domain_gene_counts, total_domain_gene_denom_count,
total_domain_gene_hit_count, 'domain')
microRNA_hit_gene_counts = {};
gene_to_miR_db = {} ### Get unique gene counts for each miR and the converse
for microRNA in microRNA_hit_gene_count_db:
microRNA_hit_gene_counts[microRNA] = len(microRNA_hit_gene_count_db[microRNA])
for gene in microRNA_hit_gene_count_db[microRNA]:
try:
gene_to_miR_db[gene].append(microRNA)
except KeyError:
gene_to_miR_db[gene] = [microRNA]
gene_to_miR_db = eliminate_redundant_dict_values(gene_to_miR_db)
if perform_element_permutation_analysis == 'yes':
###Begin Domain/microRNA Permute Analysis
input_count = len(
splice_event_list) ### Number of probesets or probeset pairs (junction array) alternatively regulated
original_increment = int(permutations / 20);
increment = original_increment
start_time = time.time();
print 'Permuting the Domain/miRBS analysis %d times' % permutations
x = 0;
permute_domain_inputs = [];
permute_miR_inputs = []
while x < permutations:
if x == increment: increment += original_increment; print '*',
permute_input_list = random.sample(denominator_list, input_count);
x += 1
permute_domain_input_gene_counts = countGenesForElement(permute_input_list, probeset_to_gene,
probeset_domain_db)
permute_domain_inputs.append(permute_domain_input_gene_counts)
permute_miR_input_gene_counts = countGenesForElement(permute_input_list, probeset_to_gene,
probeset_mirBS_db)
permute_miR_inputs.append(permute_miR_input_gene_counts)
calculatePermuteZScores(permute_domain_inputs, domain_gene_counts, Nd, Rd)
calculatePermuteZScores(permute_miR_inputs, microRNA_hit_gene_counts, Nm, Rm)
calculatePermuteStats(original_domain_z_score_data)
calculatePermuteStats(original_microRNA_z_score_data)
adjustPermuteStats(original_domain_z_score_data)
adjustPermuteStats(original_microRNA_z_score_data)
exportZScoreData(original_domain_z_score_data, 'ft-domain')
exportZScoreData(original_microRNA_z_score_data, 'microRNA')
end_time = time.time();
time_diff = int(end_time - start_time)
print "Enrichment p-values for Domains/miRBS calculated in %d seconds" % time_diff
denominator_list = []
try:
clearObjectsFromMemory(original_microRNA_z_score_data)
except Exception:
null = []
microRNA_hit_gene_count_db = {};
microRNA_hit_gene_counts = {};
clearObjectsFromMemory(permuted_z_scores);
permuted_z_scores = [];
original_domain_z_score_data = []
if (array_type == 'AltMouse' or ((
array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null')) and analysis_method != 'splicing-index':
critical_probeset_annotation_db = getJunctionSplicingAnnotations(regulated_exon_junction_db)
probeset_aligning_db = importProbesetAligningDomains(regulated_exon_junction_db, 'perfect_match')
else:
probeset_aligning_db = importProbesetAligningDomains(exon_db, 'perfect_match')
############ Export exon/junction level results ############
splice_event_db = {};
protein_length_list = [];
aspire_gene_results = {}
critical_gene_exons = {};
unique_exon_event_db = {};
comparison_count = {};
direct_domain_gene_alignments = {}
functional_attribute_db2 = {};
protein_exon_feature_db2 = {};
microRNA_exon_feature_db2 = {}
external_exon_annot = {};
gene_exon_region = {};
gene_smallest_p = {};
gene_splice_event_score = {};
alternatively_reg_tc = {}
aspire_output = root_dir + 'AltResults/AlternativeOutput/' + dataset_name + analysis_method + '-exon-inclusion-results.txt'
data = export.ExportFile(aspire_output)
goelite_output = root_dir + 'GO-Elite/AltExon/AS.' + dataset_name + analysis_method + '.txt'
goelite_data = export.ExportFile(goelite_output);
gcn = 0
#print 'LENGTH OF THE GENE ANNOTATION DATABASE',len(annotate_db)
if array_type != 'AltMouse':
DG_output = root_dir + 'AltResults/DomainGraph/' + dataset_name + analysis_method + '-DomainGraph.txt'
DG_data = export.ExportFile(DG_output)
### Write out only the inclusion hits to a subdir
SRFinder_inclusion = root_dir + 'GO-Elite/exon/' + dataset_name + analysis_method + '-inclusion.txt'
SRFinder_in_data = export.ExportFile(SRFinder_inclusion)
SRFinder_in_data.write('probeset\tSystemCode\tdeltaI\tp-value\n')
### Write out only the exclusion hits to a subdir
SRFinder_exclusion = root_dir + 'GO-Elite/exon/' + dataset_name + analysis_method + '-exclusion.txt'
SRFinder_ex_data = export.ExportFile(SRFinder_exclusion)
SRFinder_ex_data.write('probeset\tSystemCode\tdeltaI\tp-value\n')
### Write out only the denominator set to a subdir
SRFinder_denom = root_dir + 'GO-Elite/exon_denominator/' + species + '-' + array_type + '.txt'
SRFinder_denom_data = export.ExportFile(SRFinder_denom)
SRFinder_denom_data.write('probeset\tSystemCode\n')
ens_version = unique.getCurrentGeneDatabaseVersion()
ProcessedSpliceData_output = string.replace(DG_output, 'DomainGraph',
'ProcessedSpliceData') ### This is the same as the DG export but without converting the probeset IDs for non-exon arrays
ProcessedSpliceData_data = export.ExportFile(ProcessedSpliceData_output)
if ens_version == '':
try:
elite_db_versions = UI.returnDirectoriesNoReplace('/AltDatabase')
if len(elite_db_versions) > 0: ens_version = elite_db_versions[0]
except Exception:
null = []
ens_version = string.replace(ens_version, 'EnsMart', 'ENS_')
DG_data.write(ens_version + "\n")
DG_data.write("Probeset\tGeneID\tRegulation call\tSI\tSI p-value\tMiDAS p-value\n")
ProcessedSpliceData_data.write(
"ExonID(s)\tGeneID\tRegulation call\t" + analysis_method + "\t" + analysis_method + " p-value\tMiDAS p-value\n")
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
if perform_permutation_analysis == 'yes':
p_value_type = 'permutation-values'
else:
p_value_type = 'FDR-' + p_value_call
if array_type == 'AltMouse': gene_name = 'AffyGene'; extra_transcript_annotation = 'block_structure'; extra_exon_annotation = 'splice_event_description'
if array_type == 'junction' or array_type == 'RNASeq':
gene_name = 'Ensembl';
extra_transcript_annotation = 'transcript cluster ID';
extra_exon_annotation = 'distal exon-region-ID'
goelite_data.write("GeneID\tSystemCode\tscore\tp-value\tSymbol\tExonIDs\n")
if array_type == 'RNASeq':
id1 = 'junctionID-1';
id2 = 'junctionID-2';
loc_column = 'exon/junction locations'
extra_transcript_annotation = 'Known/Novel Feature'
else:
id1 = 'probeset1'; id2 = 'probeset2'; loc_column = 'probeset locations'
title = [gene_name, analysis_method, 'symbol', 'description', 'exons1', 'exons2', 'regulation_call',
'event_call', id1, 'norm-p1', id2, 'norm-p2', 'fold1', 'fold2']
title += ['adj-fold1', 'adj-fold2', extra_transcript_annotation, 'critical_up_exons', 'critical_down_exons',
'functional_prediction', 'uniprot-ens_feature_predictions']
title += ['peptide_predictions', 'exp1', 'exp2', 'ens_overlapping_domains', 'constitutive_baseline_exp',
p_value_call, p_value_type, 'permutation-false-positives']
title += ['gene-expression-change', extra_exon_annotation, 'ExternalExonIDs', 'ExonRegionID', 'SplicingEvent',
'ExonAnnotationScore', 'large_splicing_diff', loc_column]
else:
goelite_data.write("GeneID\tSystemCode\tSI\tSI p-value\tMiDAS p-value\tSymbol\tExonID\n")
if analysis_method == 'splicing-index':
NIpval = 'SI_rawp';
splicing_score = 'Splicing-Index';
lowestp = 'lowest_p (MIDAS or SI)';
AdjPcolumn = 'Deviation-Value'; #AdjPcolumn = 'SI_adjp'
else:
NIpval = 'FIRMA_rawp';
splicing_score = 'FIRMA_fold';
lowestp = 'lowest_p (MIDAS or FIRMA)';
AdjPcolumn = 'Deviation-Value'; #AdjPcolumn = 'FIRMA_adjp'
if array_type == 'RNASeq':
id1 = 'junctionID';
pval_column = 'junction p-value';
loc_column = 'junction location'
else:
id1 = 'probeset'; pval_column = 'probeset p-value'; loc_column = 'probeset location'
if array_type == 'RNASeq':
secondary_ID_title = 'Known/Novel Feature'
else:
secondary_ID_title = 'alternative gene ID'
title = ['Ensembl', splicing_score, 'symbol', 'description', 'exons', 'regulation_call', id1, pval_column,
lowestp, 'midas p-value', 'fold', 'adjfold']
title += ['up_exons', 'down_exons', 'functional_prediction', 'uniprot-ens_feature_predictions',
'peptide_predictions', 'ens_overlapping_domains', 'baseline_probeset_exp']
title += ['constitutive_baseline_exp', NIpval, AdjPcolumn, 'gene-expression-change']
title += [secondary_ID_title, 'ensembl exons', 'consitutive exon', 'exon-region-ID', 'exon annotations',
'distal exon-region-ID', loc_column]
title = string.join(title, '\t') + '\n'
try:
if original_conditions > 2: title = string.replace(title, 'regulation_call', 'conditions_compared')
except Exception:
null = []
data.write(title)
### Calculate adjusted normalized intensity p-values
fdr_exon_stats = {}
if analysis_method != 'ASPIRE' and 'linearregres' not in analysis_method:
for (score, entry) in splice_event_list: ### These are all "significant entries"
fds = FDRStats(entry.TTestNormalizedRatios())
fdr_exon_stats[entry.Probeset1()] = fds
for probeset in excluded_probeset_db: ### These are all "non-significant entries"
fds = FDRStats(excluded_probeset_db[probeset].TTestNormalizedRatios())
fdr_exon_stats[probeset] = fds
try:
adjustPermuteStats(fdr_exon_stats)
except Exception:
null = []
### Calculate score average and stdev for each gene to alter get a Deviation Value
gene_deviation_db = {}
for (score, entry) in splice_event_list:
dI = entry.Score();
geneID = entry.GeneID()
try:
gene_deviation_db[geneID].append(dI)
except Exception:
gene_deviation_db[geneID] = [dI]
for i in excluded_probeset_db:
entry = excluded_probeset_db[i]
try:
dI = entry.Score(); geneID = entry.GeneID()
except Exception:
geneID = entry[1]; dI = entry[-1]
try:
gene_deviation_db[geneID].append(dI)
except Exception:
None ### Don't include genes with no hits
for geneID in gene_deviation_db:
try:
avg_dI = statistics.avg(gene_deviation_db[geneID])
stdev_dI = statistics.stdev(gene_deviation_db[geneID])
gene_deviation_db[geneID] = avg_dI, stdev_dI
except Exception:
gene_deviation_db[geneID] = 'NA', 'NA'
event_count = 0
for (score, entry) in splice_event_list:
event_count += 1
dI = entry.Score();
probeset1 = entry.Probeset1();
regulation_call = entry.RegulationCall();
event_call = entry.EventCall();
critical_exon_list = entry.CriticalExonTuple()
probeset1_display = probeset1;
selected_probeset = probeset1
if agglomerate_inclusion_probesets == 'yes':
if array_type == 'AltMouse':
exons1 = original_exon_db[probeset1].ExonID()
try:
probeset1 = original_exon_db[probeset1].Probeset()
except Exception:
null = []
else:
probeset1 = probeset1;
exons1 = original_exon_db[probeset1].ExonID()
try:
selected_probeset = original_exon_db[probeset1].Probeset()
except Exception:
selected_probeset = probeset1
else:
try:
exons1 = exon_db[probeset1].ExonID()
except Exception:
print probeset1, len(exon_db)
for i in exon_db: print i; break
kill
critical_probeset_list = [selected_probeset]
affygene = entry.GeneID()
### Calculate deviation value for each exon
avg_dI, stdev_dI = gene_deviation_db[affygene]
try:
DV = deviation(dI, avg_dI,
stdev_dI) ### Note: the dI values are always in log2 space, independent of platform
except Exception:
DV = 'NA'
if affygene in annotate_db:
description = annotate_db[affygene].Description(); symbol = annotate_db[affygene].Symbol()
else:
description = ''; symbol = ''
ped1 = entry.ProbesetExprData1();
adjfold1 = ped1.AdjFold();
exp1 = ped1.BaselineExp();
fold1 = ped1.FoldChange();
rawp1 = ped1.ExpPval()
### Get Constitutive expression values
baseline_const_exp = entry.ConstitutiveExpression() ### For multiple group comparisosn
#if affygene in gene_expression_diff_db: mean_fold_change = gene_expression_diff_db[affygene].ConstitutiveFoldStr()
try:
mean_fold_change = str(
entry.ConstitutiveFold()) ### For multi-condition analyses, the gene expression is dependent on the conditions compared
except Exception:
mean_fold_change = gene_expression_diff_db[affygene].ConstitutiveFoldStr()
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
probeset2 = entry.Probeset2();
exons2 = exon_db[probeset2].ExonID();
rawp1 = str(entry.TTestNormalizedRatios());
rawp2 = str(entry.TTestNormalizedRatios2());
critical_probeset_list.append(probeset2)
ped2 = entry.ProbesetExprData2();
adjfold2 = ped2.AdjFold();
exp2 = ped2.BaselineExp();
fold2 = ped2.FoldChange()
try:
location_summary = original_exon_db[selected_probeset].LocationSummary() + '|' + original_exon_db[
probeset2].LocationSummary()
except Exception:
try:
location_summary = exon_db[selected_probeset].LocationSummary() + '|' + exon_db[
probeset2].LocationSummary()
except Exception:
location_summary = ''
if array_type == 'AltMouse':
extra_transcript_annotation = exon_db[probeset1].GeneStructure()
else:
try:
extra_exon_annotation = last_exon_region_db[affygene]
except KeyError:
extra_exon_annotation = ''
try:
tc1 = original_exon_db[probeset1].SecondaryGeneID()
tc2 = original_exon_db[probeset2].SecondaryGeneID() ### Transcript Cluster
probeset_tc = makeUnique([tc1, tc2])
extra_transcript_annotation = string.join(probeset_tc, '|')
try:
alternatively_reg_tc[affygene] += probeset_tc
except KeyError:
alternatively_reg_tc[affygene] = probeset_tc
except Exception:
extra_transcript_annotation = ''
if array_type == 'RNASeq':
try:
extra_transcript_annotation = entry.NovelEvent() ### Instead of secondary gene ID, list known vs. novel reciprocal junction annotation
except Exception:
None
exp_list = [float(exp1), float(exp2), float(exp1) + float(fold1), float(exp2) + float(fold2)];
exp_list.sort();
exp_list.reverse()
probeset_tuple = (probeset1, probeset2)
else:
try:
exp_list = [float(exp1), float(exp1) + float(fold1)]; exp_list.sort(); exp_list.reverse()
except Exception:
exp_list = ['']
probeset_tuple = (probeset1)
highest_exp = exp_list[0]
###Use permuted p-value or lowest expression junction p-value based on the situtation
###This p-value is used to filter out aspire events for further analyses
if len(p_value_call) > 0:
if probeset_tuple in permute_p_values:
lowest_raw_p, pos_permute, total_permute, false_pos = permute_p_values[probeset_tuple]
else:
lowest_raw_p = "NA"; pos_permute = "NA"; total_permute = "NA"; false_pos = "NA"
else:
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
raw_p_list = [entry.TTestNormalizedRatios(),
entry.TTestNormalizedRatios2()] #raw_p_list = [float(rawp1),float(rawp2)]; raw_p_list.sort()
else:
try:
raw_p_list = [
float(entry.TTestNormalizedRatios())] ###Could also be rawp1, but this is more appropriate
except Exception:
raw_p_list = [1] ### Occurs when p='NA'
raw_p_list.sort()
lowest_raw_p = raw_p_list[0];
pos_permute = "NA";
total_permute = "NA";
false_pos = "NA"
if perform_permutation_analysis == 'yes':
p_value_extra = str(pos_permute) + ' out of ' + str(total_permute)
else:
p_value_extra = str(pos_permute)
up_exons = '';
down_exons = '';
up_exon_list = [];
down_exon_list = [];
gene_exon_list = []
exon_data = critical_exon_list
variable = exon_data[0]
if variable == 1 and regulation_call == 'upregulated':
for exon in exon_data[1]:
up_exons = up_exons + exon + ',';
up_exon_list.append(exon)
key = affygene, exon + '|';
gene_exon_list.append(key)
elif variable == 1 and regulation_call == 'downregulated':
for exon in exon_data[1]:
down_exons = down_exons + exon + ',';
down_exon_list.append(exon)
key = affygene, exon + '|';
gene_exon_list.append(key)
else:
try:
exon1 = exon_data[1][0]; exon2 = exon_data[1][1]
except Exception:
print exon_data;kill
if adjfold1 > 0:
up_exons = up_exons + exon1 + ',';
down_exons = down_exons + exon2 + ','
up_exon_list.append(exon1);
down_exon_list.append(exon2)
key = affygene, exon1 + '|';
gene_exon_list.append(key);
key = affygene, exon2 + '|';
gene_exon_list.append(key)
else:
up_exons = up_exons + exon2 + ',';
down_exons = down_exons + exon1 + ','
up_exon_list.append(exon2);
down_exon_list.append(exon1)
key = affygene, exon1 + '|';
gene_exon_list.append(key);
key = affygene, exon2 + '|';
gene_exon_list.append(key)
up_exons = up_exons[0:-1];
down_exons = down_exons[0:-1]
try: ### Get comparisons group annotation data for multigroup comparison analyses
if original_conditions > 2:
try:
regulation_call = ped1.Annotation()
except Exception:
null = []
except Exception:
null = []
###Format functional results based on exon level fold change
null = []
#global a; a = exon_hits; global b; b=microRNA_attribute_db; kill
"""if 'G7100684@J934332_RC@j_at' in critical_probeset_list:
print probeset1, probeset2, gene, critical_probeset_list, 'blah'
if ('G7100684', ('G7100684@J934333_RC@j_at', 'G7100684@J934332_RC@j_at')) in functional_attribute_db:
print functional_attribute_db[('G7100684', ('G7100684@J934333_RC@j_at', 'G7100684@J934332_RC@j_at'))];blah
blah"""
new_functional_attribute_str, functional_attribute_list2, seq_attribute_str, protein_length_list = format_exon_functional_attributes(
affygene, critical_probeset_list, functional_attribute_db, up_exon_list, down_exon_list,
protein_length_list)
new_uniprot_exon_feature_str, uniprot_exon_feature_list, null, null = format_exon_functional_attributes(
affygene, critical_probeset_list, protein_exon_feature_db, up_exon_list, down_exon_list, null)
null, microRNA_exon_feature_list, null, null = format_exon_functional_attributes(affygene,
critical_probeset_list,
microRNA_attribute_db,
up_exon_list, down_exon_list,
null)
if len(new_functional_attribute_str) == 0: new_functional_attribute_str = ' '
if len(new_uniprot_exon_feature_str) == 0: new_uniprot_exon_feature_str = ' '
if len(
seq_attribute_str) > 12000: seq_attribute_str = 'The sequence is too long to report for spreadsheet analysis'
### Add entries to a database to quantify the number of reciprocal isoforms regulated
reciprocal_isoform_data = [len(critical_exon_list[1]), critical_exon_list[1], event_call, regulation_call]
try:
float((lowest_raw_p))
except ValueError:
lowest_raw_p = 0
if (float((lowest_raw_p)) <= p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try:
unique_exon_event_db[affygene].append(reciprocal_isoform_data)
except KeyError:
unique_exon_event_db[affygene] = [reciprocal_isoform_data]
### Add functional attribute information to a new database
for item in uniprot_exon_feature_list:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p)) <= p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try:
protein_exon_feature_db2[affygene, attribute].append(exon)
except KeyError:
protein_exon_feature_db2[affygene, attribute] = [exon]
### Add functional attribute information to a new database
"""Database not used for exon/junction data export but for over-representation analysis (direction specific)"""
for item in microRNA_exon_feature_list:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p)) <= p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try:
microRNA_exon_feature_db2[affygene, attribute].append(exon)
except KeyError:
microRNA_exon_feature_db2[affygene, attribute] = [exon]
### Add functional attribute information to a new database
for item in functional_attribute_list2:
attribute = item[0]
exon = item[1]
if (float((lowest_raw_p)) <= p_threshold or false_pos < 2) or lowest_raw_p == 1 or lowest_raw_p == 'NA':
try:
functional_attribute_db2[affygene, attribute].append(exon)
except KeyError:
functional_attribute_db2[affygene, attribute] = [exon]
try:
abs_fold = abs(float(mean_fold_change));
fold_direction = 'down';
fold1_direction = 'down';
fold2_direction = 'down'
large_splicing_diff1 = 0;
large_splicing_diff2 = 0;
large_splicing_diff = 'null';
opposite_splicing_pattern = 'no'
if float(mean_fold_change) > 0: fold_direction = 'up'
if float(fold1) > 0: fold1_direction = 'up'
if fold1_direction != fold_direction:
if float(fold1) > float(mean_fold_change): large_splicing_diff1 = float(fold1) - float(mean_fold_change)
except Exception:
fold_direction = '';
large_splicing_diff = '';
opposite_splicing_pattern = ''
if analysis_method != 'ASPIRE' and 'linearregres' not in analysis_method:
ed = exon_db[probeset1]
else:
try:
ed = critical_probeset_annotation_db[selected_probeset, probeset2]
except KeyError:
try:
ed = exon_db[selected_probeset] ###not useful data here, but the objects need to exist
except IOError:
ed = original_exon_db[probeset1]
ucsc_splice_annotations = ["retainedIntron", "cassetteExon", "strangeSplice", "altFivePrime", "altThreePrime",
"altPromoter", "bleedingExon"]
custom_annotations = ["alt-3'", "alt-5'", "alt-C-term", "alt-N-term", "cassette-exon", "cassette-exon",
"exon-region-exclusion", "intron-retention", "mutually-exclusive-exon", "trans-splicing"]
custom_exon_annotations_found = 'no';
ucsc_annotations_found = 'no';
exon_annot_score = 0
if len(ed.SplicingEvent()) > 0:
for annotation in ucsc_splice_annotations:
if annotation in ed.SplicingEvent(): ucsc_annotations_found = 'yes'
for annotation in custom_annotations:
if annotation in ed.SplicingEvent(): custom_exon_annotations_found = 'yes'
if custom_exon_annotations_found == 'yes' and ucsc_annotations_found == 'no':
exon_annot_score = 3
elif ucsc_annotations_found == 'yes' and custom_exon_annotations_found == 'no':
exon_annot_score = 4
elif ucsc_annotations_found == 'yes' and custom_exon_annotations_found == 'yes':
exon_annot_score = 5
else:
exon_annot_score = 2
try:
gene_splice_event_score[affygene].append(exon_annot_score) ###store for gene level results
except KeyError:
gene_splice_event_score[affygene] = [exon_annot_score]
try:
gene_exon_region[affygene].append(ed.ExonRegionID()) ###store for gene level results
except KeyError:
gene_exon_region[affygene] = [ed.ExonRegionID()]
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
if float(fold2) > 0: fold2_direction = 'up'
if fold2_direction != fold_direction:
if float(fold2) > float(mean_fold_change):
large_splicing_diff2 = float(fold2) - float(mean_fold_change)
if abs(large_splicing_diff2) > large_splicing_diff1:
large_splicing_diff = str(large_splicing_diff2)
else:
large_splicing_diff = str(large_splicing_diff1)
if fold1_direction != fold2_direction and abs(float(fold1)) > 0.4 and abs(float(fold2)) > 0.4 and abs(
float(mean_fold_change)) < max([float(fold2), float(fold1)]):
opposite_splicing_pattern = 'yes'
### Annotate splicing events based on exon_strucuture data
if array_type == 'AltMouse':
extra_exon_annotation = ExonAnnotate_module.annotate_splice_event(exons1, exons2,
extra_transcript_annotation)
try:
splice_event_db[extra_exon_annotation] += 1
except KeyError:
splice_event_db[extra_exon_annotation] = 1
try:
direct_domain_alignments = probeset_aligning_db[selected_probeset, probeset2]
try:
direct_domain_gene_alignments[affygene] += ', ' + direct_domain_alignments
except KeyError:
direct_domain_gene_alignments[affygene] = direct_domain_alignments
except KeyError:
direct_domain_alignments = ' '
splicing_event = ed.SplicingEvent()
if array_type == 'RNASeq':
splicing_event = checkForTransSplicing(probeset1_display, splicing_event)
splicing_event = checkForTransSplicing(probeset2, splicing_event)
exp1 = covertLogExpressionToNonLog(exp1)
exp2 = covertLogExpressionToNonLog(exp2)
baseline_const_exp = covertLogExpressionToNonLog(baseline_const_exp)
fold1 = covertLogFoldToNonLog(fold1)
fold2 = covertLogFoldToNonLog(fold2)
adjfold1 = covertLogFoldToNonLog(adjfold1)
adjfold2 = covertLogFoldToNonLog(adjfold2)
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
### Annotate splicing events based on pre-computed and existing annotations
values = [affygene, dI, symbol, fs(description), exons1, exons2, regulation_call, event_call,
probeset1_display, rawp1, probeset2, rawp2, fold1, fold2, adjfold1, adjfold2]
values += [extra_transcript_annotation, up_exons, down_exons, fs(new_functional_attribute_str),
fs(new_uniprot_exon_feature_str), fs(seq_attribute_str), exp1, exp2,
fs(direct_domain_alignments)]
values += [str(baseline_const_exp), str(lowest_raw_p), p_value_extra, str(false_pos), mean_fold_change,
extra_exon_annotation]
values += [ed.ExternalExonIDs(), ed.ExonRegionID(), splicing_event, str(exon_annot_score),
large_splicing_diff, location_summary]
exon_sets = abs(float(dI)), regulation_call, event_call, exons1, exons2, ''
### Export significant reciprocol junction pairs and scores
values_ps = [probeset1 + '|' + probeset2, affygene, 'changed', dI, 'NA', str(lowest_raw_p)];
values_ps = string.join(values_ps, '\t') + '\n'
try:
ProcessedSpliceData_data.write(values_ps)
except Exception:
None
values_ge = [affygene, 'En', dI, str(lowest_raw_p), symbol, probeset1_display + ' | ' + probeset2];
values_ge = string.join(values_ge, '\t') + '\n'
if array_type == 'junction' or array_type == 'RNASeq': ### Only applies to reciprocal junction sensitive platforms (but not currently AltMouse)
goelite_data.write(values_ge)
if array_type == 'junction' or array_type == 'RNASeq': ### Only applies to reciprocal junction sensitive platforms (but not currently AltMouse)
try:
exon_probeset = exon_array_translation_db[affygene + ':' + exon_data[1][0]][
0]; probeset1 = exon_probeset; gcn += 1
except Exception:
probeset1 = None #probeset1 = affygene+':'+exon_data[1][0]
try:
null = int(probeset1) ### Must be an int to work in DomainGraph
values_dg = [probeset1, affygene, 'changed', dI, 'NA', str(lowest_raw_p)];
values_dg = string.join(values_dg, '\t') + '\n'
if array_type == 'junction' or array_type == 'RNASeq':
DG_data.write(values_dg)
values_srf = string.join([probeset1, 'Ae', dI, str(lowest_raw_p)], '\t') + '\n'
if float(dI) > 0:
SRFinder_ex_data.write(values_srf)
elif float(dI) < 0:
SRFinder_in_data.write(values_srf)
except Exception:
null = []
else:
si_pvalue = lowest_raw_p
if si_pvalue == 1: si_pvalue = 'NA'
if probeset1 in midas_db:
midas_p = str(midas_db[probeset1])
if float(midas_p) < lowest_raw_p: lowest_raw_p = float(midas_p) ###This is the lowest and SI-pvalue
else:
midas_p = ''
###Determine what type of exon-annotations are present to assign a confidence score
if affygene in annotate_db: ###Determine the transcript clusters used to comprise a splice event (genes and exon specific)
try:
gene_tc = annotate_db[affygene].TranscriptClusterIDs()
try:
probeset_tc = [ed.SecondaryGeneID()]
except Exception:
probeset_tc = [affygene]
for transcript_cluster in gene_tc: probeset_tc.append(transcript_cluster)
probeset_tc = makeUnique(probeset_tc)
except Exception:
probeset_tc = ''; gene_tc = ''
else:
try:
try:
probeset_tc = [ed.SecondaryGeneID()]
except Exception:
probeset_tc = [affygene]
probeset_tc = makeUnique(probeset_tc)
except Exception:
probeset_tc = ''; gene_tc = ''
cluster_number = len(probeset_tc)
try:
alternatively_reg_tc[affygene] += probeset_tc
except KeyError:
alternatively_reg_tc[affygene] = probeset_tc
try:
last_exon_region = last_exon_region_db[affygene]
except KeyError:
last_exon_region = ''
if cluster_number > 1: exon_annot_score = 1
direct_domain_alignments = ' '
if array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null':
try:
direct_domain_alignments = probeset_aligning_db[probeset1]
try:
direct_domain_gene_alignments[affygene] += ', ' + direct_domain_alignments
except KeyError:
direct_domain_gene_alignments[affygene] = direct_domain_alignments
except KeyError:
direct_domain_alignments = ' '
else:
try:
direct_domain_alignments = probeset_aligning_db[affygene + ':' + exons1]
except KeyError:
direct_domain_alignments = ''
if array_type == 'RNASeq':
exp1 = covertLogExpressionToNonLog(exp1)
baseline_const_exp = covertLogExpressionToNonLog(baseline_const_exp)
fold1 = covertLogFoldToNonLog(fold1)
adjfold1 = covertLogFoldToNonLog(adjfold1)
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
try:
adj_SIp = fdr_exon_stats[probeset1].AdjP()
except Exception:
adj_SIp = 'NA'
try:
secondary_geneid = ed.SecondaryGeneID()
except Exception:
secondary_geneid = affygene
if array_type == 'RNASeq':
secondary_geneid = ed.NovelExon()
### Write Splicing Index results
values = [affygene, dI, symbol, fs(description), exons1, regulation_call, probeset1, rawp1,
str(lowest_raw_p), midas_p, fold1, adjfold1]
values += [up_exons, down_exons, fs(new_functional_attribute_str), fs(new_uniprot_exon_feature_str),
fs(seq_attribute_str), fs(direct_domain_alignments), exp1]
values += [str(baseline_const_exp), str(si_pvalue), DV, mean_fold_change, secondary_geneid,
ed.ExternalExonIDs()]
values += [ed.Constitutive(), ed.ExonRegionID(), ed.SplicingEvent(), last_exon_region,
ed.LocationSummary()] #str(exon_annot_score)
if probeset1 in filtered_probeset_db: values += filtered_probeset_db[probeset1]
exon_sets = abs(float(dI)), regulation_call, event_call, exons1, exons1, midas_p
probeset = probeset1 ### store original ID (gets converted below)
### Write DomainGraph results
try:
midas_p = str(midas_db[probeset1])
except KeyError:
midas_p = 'NA'
### Export significant exon/junction IDs and scores
values_ps = [probeset1, affygene, 'changed', dI, 'NA', str(lowest_raw_p)];
values_ps = string.join(values_ps, '\t') + '\n'
try:
ProcessedSpliceData_data.write(values_ps)
except Exception:
None
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
if (array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null':
try:
exon_probeset = exon_array_translation_db[affygene + ':' + exon_data[1][0]][
0]; probeset1 = exon_probeset; gcn += 1
except Exception:
probeset1 = None ### don't write out a line
else:
try:
exon_probeset = exon_array_translation_db[probeset1][0]; probeset1 = exon_probeset; gcn += 1
except Exception:
probeset1 = None; #null=[]; #print gcn, probeset1;kill - force an error - new in version 2.0.8
try:
null = int(probeset1)
values_dg = [probeset1, affygene, 'changed', dI, str(si_pvalue), midas_p];
values_dg = string.join(values_dg, '\t') + '\n'
DG_data.write(values_dg)
values_srf = string.join([probeset1, 'Ae', dI, str(lowest_raw_p)], '\t') + '\n'
if float(dI) > 0:
SRFinder_ex_data.write(values_srf)
elif float(dI) < 0:
SRFinder_in_data.write(values_srf)
except Exception:
null = []
values_ge = [affygene, 'En', dI, str(si_pvalue), midas_p, symbol, probeset];
values_ge = string.join(values_ge, '\t') + '\n'
goelite_data.write(values_ge)
if len(ed.SplicingEvent()) > 2:
try:
external_exon_annot[affygene].append(ed.SplicingEvent())
except KeyError:
external_exon_annot[affygene] = [ed.SplicingEvent()]
try:
values = string.join(values, '\t') + '\n'
except Exception:
print values;kill
data.write(values)
###Process data for gene level reports
if float((lowest_raw_p)) <= p_threshold or false_pos < 2 or lowest_raw_p == 1:
try:
comparison_count[affygene] += 1
except KeyError:
comparison_count[affygene] = 1
try:
aspire_gene_results[affygene].append(exon_sets)
except KeyError:
aspire_gene_results[affygene] = [exon_sets]
for exon in up_exon_list:
exon_info = exon, 'upregulated'
try:
critical_gene_exons[affygene].append(exon_info)
except KeyError:
critical_gene_exons[affygene] = [exon_info]
for exon in down_exon_list:
exon_info = exon, 'downregulated'
try:
critical_gene_exons[affygene].append(exon_info)
except KeyError:
critical_gene_exons[affygene] = [exon_info]
data.close()
print event_count, analysis_method, "results written to:", aspire_output, '\n'
try:
clearObjectsFromMemory(original_exon_db)
except Exception:
null = []
exon_array_translation_db = [];
original_exon_db = [];
probeset_to_gene = []
### Finish writing the DomainGraph export file with non-significant probesets
if array_type != 'AltMouse':
for probeset in excluded_probeset_db:
eed = excluded_probeset_db[probeset]
try:
midas_p = str(midas_db[probeset])
except KeyError:
midas_p = 'NA'
### Export significant exon/junction IDs and scores
try:
values_ps = [probeset, eed.GeneID(), 'UC', eed.Score(), str(eed.TTestNormalizedRatios()), midas_p]
except Exception:
excl_probeset, geneid, score, rawp, pvalue = eed; values_ps = [probeset, geneid, 'UC', str(score),
str(rawp), str(pvalue)]
values_ps = string.join(values_ps, '\t') + '\n';
ProcessedSpliceData_data.write(values_ps)
### Write DomainGraph results
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
try:
exon_probeset = exon_array_translation_db[probeset][0]; probeset = exon_probeset; gcn += 1
except Exception:
probeset = None; # null=[] - force an error - new in version 2.0.8
try:
values_dg = [probeset, eed.GeneID(), 'UC', eed.Score(), str(eed.TTestNormalizedRatios()), midas_p]
except Exception:
try:
excl_probeset, geneid, score, rawp, pvalue = eed
if ':' in probeset: probeset = excl_probeset ### Example: ENSMUSG00000029213:E2.1, make this just the numeric exclusion probeset - Not sure if DG handles non-numeric
values_dg = [probeset, geneid, 'UC', str(score), str(rawp), str(pvalue)]
except Exception:
None
try:
null = int(probeset)
values_dg = string.join(values_dg, '\t') + '\n';
DG_data.write(values_dg)
except Exception:
null = []
if array_type == 'gene' or array_type == 'junction' or array_type == 'RNASeq':
for id in exon_array_translation_db:
SRFinder_denom_data.write(exon_array_translation_db[id] + '\tAe\n')
else:
for probeset in original_exon_db:
SRFinder_denom_data.write(probeset + '\tAe\n')
DG_data.close()
SRFinder_in_data.close()
SRFinder_ex_data.close()
SRFinder_denom_data.close()
for affygene in direct_domain_gene_alignments:
domains = string.split(direct_domain_gene_alignments[affygene], ', ')
domains = unique.unique(domains);
domains = string.join(domains, ', ')
direct_domain_gene_alignments[affygene] = domains
### functional_attribute_db2 will be reorganized so save the database with another. Use this
functional_attribute_db = functional_attribute_db2
functional_attribute_db2 = reorganize_attribute_entries(functional_attribute_db2, 'no')
external_exon_annot = eliminate_redundant_dict_values(external_exon_annot)
protein_exon_feature_db = protein_exon_feature_db2
protein_exon_feature_db2 = reorganize_attribute_entries(protein_exon_feature_db2, 'no')
############ Export Gene Data ############
up_splice_val_genes = 0;
down_dI_genes = 0;
diff_exp_spliced_genes = 0;
diff_spliced_rna_factor = 0
ddI = 0;
udI = 0
summary_data_db['direct_domain_genes'] = len(direct_domain_gene_alignments)
summary_data_db['alt_genes'] = len(aspire_gene_results)
critical_gene_exons = eliminate_redundant_dict_values(critical_gene_exons)
aspire_output_gene = root_dir + 'AltResults/AlternativeOutput/' + dataset_name + analysis_method + '-exon-inclusion-GENE-results.txt'
data = export.ExportFile(aspire_output_gene)
if array_type == 'AltMouse': goelite_data.write("GeneID\tSystemCode\n")
title = ['AffyGene', 'max_dI', 'midas-p (corresponding)', 'symbol', 'external gene ID', 'description',
'regulation_call', 'event_call']
title += ['number_of_comparisons', 'num_effected_exons', 'up_exons', 'down_exons', 'functional_attribute',
'uniprot-ens_exon_features', 'direct_domain_alignments']
title += ['pathways', 'mean_fold_change', 'exon-annotations', 'exon-region IDs', 'alternative gene ID',
'splice-annotation score']
title = string.join(title, '\t') + '\n'
data.write(title)
for affygene in aspire_gene_results:
if affygene in annotate_db:
description = annotate_db[affygene].Description()
symbol = annotate_db[affygene].Symbol()
ensembl = annotate_db[affygene].ExternalGeneID()
if array_type != 'AltMouse' and array_type != 'RNASeq':
transcript_clusters = alternatively_reg_tc[affygene]; transcript_clusters = makeUnique(
transcript_clusters); transcript_clusters = string.join(transcript_clusters, '|')
else:
transcript_clusters = affygene
rna_processing_factor = annotate_db[affygene].RNAProcessing()
else:
description = '';symbol = '';ensembl = affygene;rna_processing_factor = ''; transcript_clusters = ''
if ensembl in go_annotations:
wpgo = go_annotations[ensembl]; goa = wpgo.Combined()
else:
goa = ''
if array_type == 'AltMouse':
if len(ensembl) > 0: goelite_data.write(ensembl + '\tL\n')
try:
gene_splice_event_score[affygene].sort(); top_se_score = str(gene_splice_event_score[affygene][-1])
except KeyError:
top_se_score = 'NA'
try:
gene_regions = gene_exon_region[affygene]; gene_regions = makeUnique(
gene_regions); gene_regions = string.join(gene_regions, '|')
except KeyError:
gene_regions = 'NA'
if analysis_method == 'ASPIRE' or analysis_method == 'linearregres':
number_of_comparisons = str(comparison_count[affygene])
else:
number_of_comparisons = 'NA'
results_list = aspire_gene_results[affygene]
results_list.sort();
results_list.reverse()
max_dI = str(results_list[0][0])
regulation_call = results_list[0][1]
event_call = results_list[0][2]
midas_p = results_list[0][-1]
num_critical_exons = str(len(critical_gene_exons[affygene]))
try:
direct_domain_annots = direct_domain_gene_alignments[affygene]
except KeyError:
direct_domain_annots = ' '
down_exons = '';
up_exons = '';
down_list = [];
up_list = []
for exon_info in critical_gene_exons[affygene]:
exon = exon_info[0];
call = exon_info[1]
if call == 'downregulated':
down_exons = down_exons + exon + ','
down_list.append(exon)
ddI += 1
if call == 'upregulated':
up_exons = up_exons + exon + ','
up_list.append(exon)
udI += 1
down_exons = down_exons[0:-1]
up_exons = up_exons[0:-1]
up_exons = add_a_space(up_exons);
down_exons = add_a_space(down_exons)
functional_annotation = ''
if affygene in functional_attribute_db2:
number_of_functional_attributes = str(len(functional_attribute_db2[affygene]))
attribute_list = functional_attribute_db2[affygene]
attribute_list.sort()
for attribute_exon_info in attribute_list:
exon_attribute = attribute_exon_info[0]
exon_list = attribute_exon_info[1]
functional_annotation = functional_annotation + exon_attribute
exons = '('
for exon in exon_list: exons = exons + exon + ','
exons = exons[0:-1] + '),'
if add_exons_to_annotations == 'yes':
functional_annotation = functional_annotation + exons
else:
functional_annotation = functional_annotation + ','
functional_annotation = functional_annotation[0:-1]
uniprot_exon_annotation = ''
if affygene in protein_exon_feature_db2:
number_of_functional_attributes = str(len(protein_exon_feature_db2[affygene]))
attribute_list = protein_exon_feature_db2[affygene];
attribute_list.sort()
for attribute_exon_info in attribute_list:
exon_attribute = attribute_exon_info[0]
exon_list = attribute_exon_info[1]
uniprot_exon_annotation = uniprot_exon_annotation + exon_attribute
exons = '('
for exon in exon_list: exons = exons + exon + ','
exons = exons[0:-1] + '),'
if add_exons_to_annotations == 'yes':
uniprot_exon_annotation = uniprot_exon_annotation + exons
else:
uniprot_exon_annotation = uniprot_exon_annotation + ','
uniprot_exon_annotation = uniprot_exon_annotation[0:-1]
if len(uniprot_exon_annotation) == 0: uniprot_exon_annotation = ' '
if len(functional_annotation) == 0: functional_annotation = ' '
if affygene in gene_expression_diff_db:
mean_fold_change = gene_expression_diff_db[affygene].ConstitutiveFoldStr()
try:
if abs(float(mean_fold_change)) > log_fold_cutoff: diff_exp_spliced_genes += 1
except Exception:
diff_exp_spliced_genes = diff_exp_spliced_genes
else:
mean_fold_change = 'NC'
if len(rna_processing_factor) > 2: diff_spliced_rna_factor += 1
###Add annotations for where in the gene structure these exons are (according to Ensembl)
if affygene in external_exon_annot:
external_gene_annot = string.join(external_exon_annot[affygene], ', ')
else:
external_gene_annot = ''
if array_type == 'RNASeq':
mean_fold_change = covertLogFoldToNonLog(mean_fold_change)
values = [affygene, max_dI, midas_p, symbol, ensembl, fs(description), regulation_call, event_call,
number_of_comparisons]
values += [num_critical_exons, up_exons, down_exons, functional_annotation]
values += [fs(uniprot_exon_annotation), fs(direct_domain_annots), fs(goa), mean_fold_change,
external_gene_annot, gene_regions, transcript_clusters, top_se_score]
values = string.join(values, '\t') + '\n'
data.write(values)
### Use results for summary statistics
if len(up_list) > len(down_list):
up_splice_val_genes += 1
else:
down_dI_genes += 1
data.close()
print "Gene-level results written"
###yes here indicates that although the truncation events will initially be filtered out, later they will be added
###back in without the non-truncation annotations....if there is no second database (in this case functional_attribute_db again)
###IF WE WANT TO FILTER OUT NON-NMD ENTRIES WHEN NMD IS PRESENT (FOR A GENE) MUST INCLUDE functional_attribute_db AS THE SECOND VARIABLE!!!!
###Currently, yes does nothing
functional_annotation_db, null = grab_summary_dataset_annotations(functional_attribute_db, '', 'yes')
upregulated_genes = 0;
downregulated_genes = 0
###Calculate the number of upregulated and downregulated genes
for affygene in gene_expression_diff_db:
fold_val = gene_expression_diff_db[affygene].ConstitutiveFold()
try:
if float(fold_val) > log_fold_cutoff:
upregulated_genes += 1
elif abs(float(fold_val)) > log_fold_cutoff:
downregulated_genes += 1
except Exception:
null = []
upregulated_rna_factor = 0;
downregulated_rna_factor = 0
###Calculate the total number of putative RNA-processing/binding factors differentially regulated
for affygene in gene_expression_diff_db:
gene_fold = gene_expression_diff_db[affygene].ConstitutiveFold()
rna_processing_factor = gene_expression_diff_db[affygene].RNAProcessing()
if len(rna_processing_factor) > 1:
if gene_fold > log_fold_cutoff:
upregulated_rna_factor += 1
elif abs(gene_fold) > log_fold_cutoff:
downregulated_rna_factor += 1
###Generate three files for downstream functional summary
### functional_annotation_db2 is output to the same function as functional_annotation_db, ranked_uniprot_list_all to get all ranked uniprot annotations,
### and ranked_uniprot_list_coding_only to get only coding ranked uniprot annotations
functional_annotation_db2, ranked_uniprot_list_all = grab_summary_dataset_annotations(protein_exon_feature_db, '',
'') #functional_attribute_db
null, ranked_uniprot_list_coding_only = grab_summary_dataset_annotations(protein_exon_feature_db,
functional_attribute_db,
'') #functional_attribute_db
functional_attribute_db = [];
protein_exon_feature_db = []
###Sumarize changes in avg protein length for each splice event
up_protein_list = [];
down_protein_list = [];
protein_length_fold_diff = []
for [down_protein, up_protein] in protein_length_list:
up_protein = float(up_protein);
down_protein = float(down_protein)
down_protein_list.append(down_protein);
up_protein_list.append(up_protein)
if up_protein > 10 and down_protein > 10:
fold_change = up_protein / down_protein;
protein_length_fold_diff.append(fold_change)
median_fold_diff = statistics.median(protein_length_fold_diff)
try:
down_avg = int(statistics.avg(down_protein_list)); up_avg = int(statistics.avg(up_protein_list))
except Exception:
down_avg = 0; up_avg = 0
try:
try:
down_std = int(statistics.stdev(down_protein_list));
up_std = int(statistics.stdev(up_protein_list))
except ValueError: ###If 'null' is returned fro stdev
down_std = 0;
up_std = 0
except Exception:
down_std = 0;
up_std = 0
if len(down_protein_list) > 1 and len(up_protein_list) > 1:
try:
#t,df,tails = statistics.ttest(down_protein_list,up_protein_list,2,3)
#t = abs(t);df = round(df)
#print 'ttest t:',t,'df:',df
#p = str(statistics.t_probability(t,df))
p = str(statistics.runComparisonStatistic(down_protein_list, up_protein_list, probability_statistic))
#print dataset_name,p
except Exception:
p = 'NA'
if p == 1: p = 'NA'
else:
p = 'NA'
###Calculate unique reciprocal isoforms for exon-inclusion, exclusion and mutual-exclusive events
unique_exon_inclusion_count = 0;
unique_exon_exclusion_count = 0;
unique_mutual_exclusive_count = 0;
unique_exon_event_db = eliminate_redundant_dict_values(unique_exon_event_db)
for affygene in unique_exon_event_db:
isoform_entries = unique_exon_event_db[affygene]
possibly_redundant = [];
non_redundant = [];
check_for_redundant = []
for entry in isoform_entries:
if entry[0] == 1: ### If there is only one regulated exon
possibly_redundant.append(entry)
else:
non_redundant.append(entry)
critical_exon_list = entry[1]
for exon in critical_exon_list:
check_for_redundant.append(exon)
for entry in possibly_redundant:
exon = entry[1][0]
if exon not in check_for_redundant:
non_redundant.append(entry)
for entry in non_redundant:
if entry[2] == 'ei-ex':
if entry[3] == 'upregulated':
unique_exon_inclusion_count += 1
else:
unique_exon_exclusion_count += 1
else:
unique_mutual_exclusive_count += 1
udI = unique_exon_inclusion_count;
ddI = unique_exon_exclusion_count;
mx = unique_mutual_exclusive_count
###Add splice event information to the functional_annotation_db
for splice_event in splice_event_db: count = splice_event_db[splice_event]; functional_annotation_db.append(
(splice_event, count))
if analysis_method == 'splicing-index' or analysis_method == 'FIRMA': udI = 'NA'; ddI = 'NA'
summary_results_db[dataset_name[0:-1]] = udI, ddI, mx, up_splice_val_genes, down_dI_genes, (
up_splice_val_genes + down_dI_genes), upregulated_genes, downregulated_genes, diff_exp_spliced_genes, upregulated_rna_factor, downregulated_rna_factor, diff_spliced_rna_factor, down_avg, down_std, up_avg, up_std, p, median_fold_diff, functional_annotation_db
result_list = exportComparisonSummary(dataset_name, summary_data_db, 'log')
###Re-set this variable (useful for testing purposes)
clearObjectsFromMemory(gene_expression_diff_db)
clearObjectsFromMemory(splice_event_list);
clearObjectsFromMemory(si_db);
si_db = []
clearObjectsFromMemory(fdr_exon_stats)
try:
clearObjectsFromMemory(excluded_probeset_db); clearObjectsFromMemory(ex_db); ex_db = []
except Exception:
ex_db = []
clearObjectsFromMemory(exon_db)
#clearObjectsFromMemory(annotate_db)
critical_probeset_annotation_db = [];
gene_expression_diff_db = [];
domain_associated_genes = [];
permute_p_values = []
permute_miR_inputs = [];
seq_attribute_str = [];
microRNA_count_db = [];
excluded_probeset_db = [];
fdr_exon_stats = []
splice_event_list = [];
critical_exon_db_len = len(
critical_exon_db)#; critical_exon_db=[] deleting here will cause a global instance problem
all_domain_gene_hits = [];
gene_splice_event_score = [];
unique_exon_event_db = [];
probeset_aligning_db = [];
ranked_uniprot_list_all = [];
filtered_microRNA_exon_db = [];
permute_domain_inputs = [];
functional_annotation_db2 = [];
functional_attribute_db2 = [];
protein_length_list = [];
ranked_uniprot_list_coding_only = [];
miR_str = [];
permute_input_list = [];
microRNA_exon_feature_db2 = [];
alternatively_reg_tc = [];
direct_domain_gene_alignments = [];
aspire_gene_results = [];
domain_gene_counts = [];
functional_annotation = [];
protein_exon_feature_db2 = [];
microRNA_attribute_db = [];
probeset_mirBS_db = [];
exon_hits = [];
critical_gene_exons = [];
gene_exon_region = [];
exon_db = [];
external_exon_annot = [];
values = [];
down_protein_list = [];
functional_annotation_db = [];
protein_length_fold_diff = [];
comparison_count = [];
filtered_arrayids = [];
domain_hit_gene_count_db = [];
up_protein_list = [];
probeset_domain_db = []
try:
goelite_data.close()
except Exception:
null = []
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
return summary_results_db, summary_results_db2, aspire_output, aspire_output_gene, critical_exon_db_len
def deviation(dI, avg_dI, stdev_dI):
dI = covertLogFoldToNonLogFloat(dI)
avg_dI = covertLogFoldToNonLogFloat(avg_dI)
stdev_dI = covertLogFoldToNonLogFloat(stdev_dI)
return str(abs((dI - avg_dI) / stdev_dI))
def covertLogExpressionToNonLog(log_val):
if normalization_method == 'RPKM':
nonlog_val = (math.pow(2, float(log_val)))
else:
nonlog_val = (math.pow(2, float(log_val))) - 1
return str(nonlog_val)
def covertLogFoldToNonLog(log_val):
try:
if float(log_val) < 0:
nonlog_val = (-1 / math.pow(2, (float(log_val))))
else:
nonlog_val = (math.pow(2, float(log_val)))
except Exception:
nonlog_val = log_val
return str(nonlog_val)
def covertLogFoldToNonLogFloat(log_val):
if float(log_val) < 0:
nonlog_val = (-1 / math.pow(2, (float(log_val))))
else:
nonlog_val = (math.pow(2, float(log_val)))
return nonlog_val
def checkForTransSplicing(uid, splicing_event):
pl = string.split(uid, ':')
if len(pl) > 2:
if pl[0] not in pl[1]: ### Two different genes
if len(splicing_event) > 0:
splicing_event += '|trans-splicing'
else:
splicing_event = '|trans-splicing'
return splicing_event
def fs(text):
### Formats a text entry to prevent delimiting a comma
return '"' + text + '"'
def analyzeSplicingIndex(fold_dbase):
"""The Splicing Index (SI) represents the log ratio of the exon intensities between the two tissues after normalization
to the gene intensities in each sample: SIi = log2((e1i/g1j)/(e2i/g2j)), for the i-th exon of the j-th gene in tissue
type 1 or 2. The splicing indices are then subjected to a t-test to probe for differential inclusion of the exon into the gene.
In order to determine if the change in isoform expression was statistically significant, a simple two-tailed t-test was carried
out on the isoform ratios by grouping the 10 samples from either "tumor" or "normal" tissue.
The method ultimately producing the highest proportion of true positives was to retain only: a) exons with a DABG p-value < 0.05,
b) genes with a signal > 70, c) exons with a log ratio between tissues (i.e., the gene-level normalized fold change) > 0.5,
d) Splicing Index p-values < 0.005 and e) Core exons.
Gardina PJ, Clark TA, Shimada B, Staples MK, Yang Q, Veitch J, Schweitzer A, Awad T, Sugnet C, Dee S, Davies C, Williams A, Turpaz Y.
Alternative splicing and differential gene expression in colon cancer detected by a whole genome exon array.
BMC Genomics. 2006 Dec 27;7:325. PMID: 17192196
"""
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db) > 0:
temp_db = {}
for probeset in fold_dbase: temp_db[probeset] = []
for probeset in temp_db:
try:
filtered_probeset_db[probeset]
except KeyError:
del fold_dbase[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
proceed = 0
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try:
del fold_dbase[probeset]
except KeyError:
null = []
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
summary_output = root_dir + 'AltResults/RawSpliceData/' + species + '/' + analysis_method + '/' + dataset_name[
:-1] + '.txt'
data = export.ExportFile(summary_output)
title = string.join(['gene\tExonID\tprobesets'] + original_array_names, '\t') + '\n';
data.write(title)
print 'Calculating splicing-index values (please be patient)...',
if array_type == 'RNASeq':
id_name = 'exon/junction IDs'
else:
id_name = 'array IDs'
print len(fold_dbase), id_name, 'beging examined'
###original_avg_const_exp_db contains constitutive mean expression values per group: G6953871 [7.71, 7.66]
###array_raw_group_values: Raw expression values in list of groups: G7072464@J935416_RC@j_at ([1.79, 2.16, 2.22], [1.68, 2.24, 1.97, 1.92, 2.12])
###avg_const_exp_db contains the raw constitutive expression values in a single list
splicing_index_hash = [];
excluded_probeset_db = {};
denominator_probesets = 0;
interaction = 0
original_increment = int(len(exon_db) / 20);
increment = original_increment
for probeset in exon_db:
ed = exon_db[probeset]
#include_probeset = ed.IncludeProbeset()
if interaction == increment: increment += original_increment; print '*',
interaction += 1
include_probeset = 'yes' ###Moved this filter to import of the probeset relationship file
###Examines user input parameters for inclusion of probeset types in the analysis
if include_probeset == 'yes':
geneid = ed.GeneID()
if probeset in fold_dbase and geneid in original_avg_const_exp_db: ###used to search for array_raw_group_values, but when filtered by expression changes, need to filter by adj_fold_dbase
denominator_probesets += 1
###Includes probesets with a calculated constitutive expression value for each gene and expression data for that probeset
group_index = 0;
si_interim_group_db = {};
si_interim_group_str_db = {};
ge_threshold_count = 0;
value_count = 0
for group_values in array_raw_group_values[probeset]:
"""gene_expression_value = math.pow(2,original_avg_const_exp_db[geneid][group_index])
###Check to see if gene expression is > threshod for both conditions
if gene_expression_value>gene_expression_threshold:ge_threshold_count+=1"""
value_index = 0;
ratio_hash = [];
ratio_str_hash = []
for value in group_values: ###Calculate normalized ratio's for each condition and save raw values for later permutation
#exp_val = math.pow(2,value);ge_val = math.pow(2,avg_const_exp_db[geneid][value_count]) ###To calculate a ttest we need the raw constitutive expression values, these are not in group list form but are all in a single list so keep count.
exp_val = value;
ge_val = avg_const_exp_db[geneid][value_count]
exp_ratio = exp_val - ge_val;
ratio_hash.append(exp_ratio);
ratio_str_hash.append(str(exp_ratio))
value_index += 1;
value_count += 1
si_interim_group_db[group_index] = ratio_hash
si_interim_group_str_db[group_index] = ratio_str_hash
group_index += 1
group1_ratios = si_interim_group_db[0];
group2_ratios = si_interim_group_db[1]
group1_mean_ratio = statistics.avg(group1_ratios);
group2_mean_ratio = statistics.avg(group2_ratios)
if export_NI_values == 'yes':
try:
er = ed.ExonID()
except Exception:
er = 'NA'
ev = string.join(
[geneid + '\t' + er + '\t' + probeset] + si_interim_group_str_db[0] + si_interim_group_str_db[
1], '\t') + '\n';
data.write(ev)
#if ((math.log(group1_mean_ratio,2))*(math.log(group2_mean_ratio,2)))<0: opposite_SI_log_mean = 'yes'
if (group1_mean_ratio * group2_mean_ratio) < 0:
opposite_SI_log_mean = 'yes'
else:
opposite_SI_log_mean = 'no'
try:
if calculate_normIntensity_p == 'yes':
try:
normIntensityP = statistics.runComparisonStatistic(group1_ratios, group2_ratios,
probability_statistic)
except Exception:
normIntensityP = 'NA' ### Occurs when analyzing two groups with no variance
else:
normIntensityP = 'NA' ### Set to an always signficant value
if normIntensityP == 1: normIntensityP = 'NA'
splicing_index = group1_mean_ratio - group2_mean_ratio;
abs_splicing_index = abs(splicing_index)
#if probeset == '3061323': print abs_splicing_index,normIntensityP,ed.ExonID(),group1_mean_ratio,group2_mean_ratio,math.log(group1_mean_ratio,2),math.log(group2_mean_ratio,2),((math.log(group1_mean_ratio,2))*(math.log(group2_mean_ratio,2))),opposite_SI_log_mean; kill
if probeset in midas_db:
try:
midas_p = float(midas_db[probeset])
except ValueError:
midas_p = 0
#if abs_splicing_index>1 and normIntensityP < 0.05: print probeset,normIntensityP, abs_splicing_index;kill
else:
midas_p = 0
#print ed.GeneID(),ed.ExonID(),probeset,splicing_index,normIntensityP,midas_p,group1_ratios,group2_ratios
if abs_splicing_index > alt_exon_logfold_cutoff and (
normIntensityP < p_threshold or normIntensityP == 'NA' or normIntensityP == 1) and midas_p < p_threshold:
exonid = ed.ExonID();
critical_exon_list = [1, [exonid]]
constit_exp1 = original_avg_const_exp_db[geneid][0]
constit_exp2 = original_avg_const_exp_db[geneid][1]
ge_fold = constit_exp2 - constit_exp1
### Re-define all of the pairwise values now that the two Splicing-Index groups to report have been determined
data_list1 = array_raw_group_values[probeset][0];
data_list2 = array_raw_group_values[probeset][1]
baseline_exp = statistics.avg(data_list1);
experimental_exp = statistics.avg(data_list2);
fold_change = experimental_exp - baseline_exp
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1, data_list2,
probability_statistic)
except Exception:
ttest_exp_p = 1
normInt1 = (baseline_exp - constit_exp1);
normInt2 = (experimental_exp - constit_exp2);
adj_fold = normInt2 - normInt1
ped = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p,
'')
sid = ExonData(splicing_index, probeset, critical_exon_list, geneid, group1_ratios,
group2_ratios, normIntensityP, opposite_SI_log_mean)
sid.setConstitutiveExpression(constit_exp1);
sid.setConstitutiveFold(ge_fold);
sid.setProbesetExpressionData(ped)
splicing_index_hash.append((splicing_index, sid))
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(splicing_index, geneid, normIntensityP)
excluded_probeset_db[probeset] = eed
except Exception:
null = [] ###If this occurs, then most likely, the exon and constitutive probeset are the same
print 'Splicing Index analysis complete'
if export_NI_values == 'yes': data.close()
splicing_index_hash.sort();
splicing_index_hash.reverse()
print len(splicing_index_hash), id_name, "with evidence of Alternative expression"
p_value_call = '';
permute_p_values = {};
summary_data_db['denominator_exp_events'] = denominator_probesets
return splicing_index_hash, p_value_call, permute_p_values, excluded_probeset_db
def importResiduals(filename, probe_probeset_db):
fn = filepath(filename);
key_db = {};
x = 0;
prior_uid = '';
uid_gene_db = {}
for line in open(fn, 'rU').xreadlines():
if x == 0 and line[0] == '#':
null = []
elif x == 0:
x += 1
else:
data = cleanUpLine(line)
t = string.split(data, '\t')
uid = t[0];
uid, probe = string.split(uid, '-')
try:
probeset = probe_probeset_db[probe];
residuals = t[1:]
if uid == prior_uid:
try:
uid_gene_db[probeset].append(residuals) ### Don't need to keep track of the probe ID
except KeyError:
uid_gene_db[probeset] = [residuals]
else: ### Hence, we have finished storing all residual data for that gene
if len(uid_gene_db) > 0: calculateFIRMAScores(uid_gene_db); uid_gene_db = {}
try:
uid_gene_db[probeset].append(residuals) ### Don't need to keep track of the probe ID
except KeyError:
uid_gene_db[probeset] = [residuals]
prior_uid = uid
except Exception:
null = []
### For the last gene imported
if len(uid_gene_db) > 0: calculateFIRMAScores(uid_gene_db)
def calculateFIRMAScores(uid_gene_db):
probeset_residuals = {};
all_gene_residuals = [];
total_probes = 0
for probeset in uid_gene_db:
residuals_list = uid_gene_db[probeset];
sample_db = {};
total_probes += len(residuals_list)
### For all probes in a probeset, calculate the median residual for each sample
for residuals in residuals_list:
index = 0
for residual in residuals:
try:
sample_db[index].append(float(residual))
except KeyError:
sample_db[index] = [float(residual)]
all_gene_residuals.append(float(residual))
index += 1
for index in sample_db:
median_residual = statistics.median(sample_db[index])
sample_db[index] = median_residual
probeset_residuals[probeset] = sample_db
### Calculate the Median absolute deviation
"""http://en.wikipedia.org/wiki/Absolute_deviation
The median absolute deviation (also MAD) is the median absolute deviation from the median. It is a robust estimator of dispersion.
For the example {2, 2, 3, 4, 14}: 3 is the median, so the absolute deviations from the median are {1, 1, 0, 1, 11} (or reordered as
{0, 1, 1, 1, 11}) with a median absolute deviation of 1, in this case unaffected by the value of the outlier 14.
Here, the global gene median will be expressed as res_gene_median.
"""
res_gene_median = statistics.median(all_gene_residuals);
subtracted_residuals = []
for residual in all_gene_residuals: subtracted_residuals.append(abs(res_gene_median - residual))
gene_MAD = statistics.median(subtracted_residuals)
#if '3263614' in probeset_residuals: print len(all_gene_residuals),all_gene_residuals
for probeset in probeset_residuals:
sample_db = probeset_residuals[probeset]
for index in sample_db:
median_residual = sample_db[index]
try:
firma_score = median_residual / gene_MAD
sample_db[index] = firma_score
except Exception:
null = []
#if probeset == '3263614': print index, median_residual, firma_score, gene_MAD
firma_scores[probeset] = sample_db
def importProbeToProbesets(fold_dbase):
#print "Importing probe-to-probeset annotations (please be patient)..."
filename = 'AltDatabase/' + species + '/' + array_type + '/' + species + '_probeset-probes.txt'
probeset_to_include = {}
gene2examine = {}
### Although we want to restrict the analysis to probesets in fold_dbase, we don't want to effect the FIRMA model - filter later
for probeset in fold_dbase:
try:
ed = exon_db[probeset]; gene2examine[ed.GeneID()] = []
except Exception:
null = []
for gene in original_avg_const_exp_db: gene2examine[gene] = []
for probeset in exon_db:
ed = exon_db[probeset];
geneid = ed.GeneID()
if geneid in gene2examine:
gene2examine[geneid].append(probeset) ### Store these so we can break things up
probeset_to_include[probeset] = []
probeset_probe_db = importGenericFilteredDBList(filename, probeset_to_include)
### Get Residuals filename and verify it's presence
#print "Importing comparison residuals..."
filename_objects = string.split(dataset_name[:-1], '.p');
filename = filename_objects[0] + '.txt'
if len(array_group_list) == 2:
filename = import_dir = root_dir + 'AltExpression/FIRMA/residuals/' + array_type + '/' + species + '/' + filename
else:
filename = import_dir = root_dir + 'AltExpression/FIRMA/FullDatasets/' + array_type + '/' + species + '/' + filename
status = verifyFile(filename)
if status != 'found':
print_out = 'The residual file:';
print_out += filename
print_out += 'was not found in the default location.\nPlease make re-run the analysis from the Beginning.'
try:
UI.WarningWindow(print_out, 'Exit')
except Exception:
print print_out
print traceback.format_exc();
badExit()
print "Calculating FIRMA scores..."
input_count = len(gene2examine) ### Number of probesets or probeset pairs (junction array) alternatively regulated
original_increment = int(input_count / 20);
increment = original_increment
start_time = time.time();
x = 0
probe_probeset_db = {};
gene_count = 0;
total_gene_count = 0;
max_gene_count = 3000;
round = 1
for gene in gene2examine:
gene_count += 1;
total_gene_count += 1;
x += 1
#if x == increment: increment+=original_increment; print '*',
for probeset in gene2examine[gene]:
for probe in probeset_probe_db[probeset]: probe_probeset_db[probe] = probeset
if gene_count == max_gene_count:
### Import residuals and calculate primary sample/probeset FIRMA scores
importResiduals(filename, probe_probeset_db)
#print max_gene_count*round,"genes"
print '*',
gene_count = 0;
probe_probeset_db = {};
round += 1 ### Reset these variables and re-run
probeset_probe_db = {}
### Analyze residuals for the remaining probesets (< max_gene_count)
importResiduals(filename, probe_probeset_db)
end_time = time.time();
time_diff = int(end_time - start_time)
print "FIRMA scores calculted for", total_gene_count, "genes in %d seconds" % time_diff
def FIRMAanalysis(fold_dbase):
"""The FIRMA method calculates a score for each probeset and for each samples within a group of arrays, independent
of group membership. However, in AltAnalyze, these analyses are performed dependent on group. The FIRMA score is calculated
by obtaining residual values (residuals is a variable for each probe that can't be explained by the GC content or intensity
of that probe) from APT, for all probes corresponding to a metaprobeset (Ensembl gene in AltAnalyze). These probe residuals
are imported and the ratio of the median residual per probeset per sample divided by the absolute standard deviation of the
median of all probes for all samples for that gene."""
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db) > 0:
temp_db = {}
for probeset in fold_dbase: temp_db[probeset] = []
for probeset in temp_db:
try:
filtered_probeset_db[probeset]
except KeyError:
del fold_dbase[probeset]
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing annotation)
if filter_for_AS == 'yes':
proceed = 0
for probeset in exon_db:
as_call = exon_db[probeset].SplicingCall()
if as_call == 0:
try:
del fold_dbase[probeset]
except KeyError:
null = []
#print 'Beginning FIRMA analysis (please be patient)...'
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
sample_names_ordered = [] ### note: Can't use original_array_names since the order is potentially different (FIRMA stores sample data as indeces within dictionary keys)
for group_name in array_group_list: ### THIS LIST IS USED TO MAINTAIN CONSISTENT GROUP ORDERING DURING ANALYSIS
for sample_name in array_group_name_db[group_name]: sample_names_ordered.append(sample_name)
summary_output = root_dir + 'AltResults/RawSpliceData/' + species + '/' + analysis_method + '/' + dataset_name[
:-1] + '.txt'
data = export.ExportFile(summary_output)
title = string.join(['gene-probesets'] + sample_names_ordered, '\t') + '\n';
data.write(title)
### Import probes for probesets to be analyzed
global firma_scores;
firma_scores = {}
importProbeToProbesets(fold_dbase)
print 'FIRMA scores obtained for', len(firma_scores), 'probests.'
### Group sample scores for each probeset and calculate statistics
firma_hash = [];
excluded_probeset_db = {};
denominator_probesets = 0;
interaction = 0
original_increment = int(len(firma_scores) / 20);
increment = original_increment
for probeset in firma_scores:
if probeset in fold_dbase: ### Filter based on expression
ed = exon_db[probeset];
geneid = ed.GeneID()
if interaction == increment: increment += original_increment; print '*',
interaction += 1;
denominator_probesets += 1
sample_db = firma_scores[probeset]
###Use the index values from performExpressionAnalysis to assign each expression value to a new database
firma_group_array = {}
for group_name in array_group_db:
for array_index in array_group_db[group_name]:
firma_score = sample_db[array_index]
try:
firma_group_array[group_name].append(firma_score)
except KeyError:
firma_group_array[group_name] = [firma_score]
###array_group_list should already be unique and correctly sorted (see above)
firma_lists = [];
index = 0
for group_name in array_group_list:
firma_list = firma_group_array[group_name]
if len(array_group_list) > 2: firma_list = statistics.avg(firma_list), firma_list, index
firma_lists.append(firma_list);
index += 1
if export_NI_values == 'yes': ### DO THIS HERE SINCE firma_lists IS SORTED BELOW!!!!
try:
er = ed.ExonID()
except Exception:
er = 'NA'
export_list = [geneid + '\t' + er + '\t' + probeset];
export_list2 = []
for firma_ls in firma_lists:
if len(array_group_list) > 2: firma_ls = firma_ls[
1] ### See above modification of firma_list object for multiple group anlaysis
export_list += firma_ls
for i in export_list: export_list2.append(str(i))
ev = string.join(export_list2, '\t') + '\n';
data.write(ev)
if len(array_group_list) == 2:
firma_list1 = firma_lists[0];
firma_list2 = firma_lists[-1];
firma_avg1 = statistics.avg(firma_list1);
firma_avg2 = statistics.avg(firma_list2)
index1 = 0;
index2 = 1 ### Only two groups, thus only two indeces
else: ### The below code deals with identifying the comparisons which yeild the greatest FIRMA difference
firma_lists.sort();
index1 = firma_lists[0][-1];
index2 = firma_lists[-1][-1]
firma_list1 = firma_lists[0][1];
firma_list2 = firma_lists[-1][1];
firma_avg1 = firma_lists[0][0];
firma_avg2 = firma_lists[-1][0]
if calculate_normIntensity_p == 'yes':
try:
normIntensityP = statistics.runComparisonStatistic(firma_list1, firma_list2, probability_statistic)
except Exception:
normIntensityP = 'NA' ### Occurs when analyzing two groups with no variance
else:
normIntensityP = 'NA'
if normIntensityP == 1: normIntensityP = 'NA'
firma_fold_change = firma_avg2 - firma_avg1
firma_fold_change = -1 * firma_fold_change ### Make this equivalent to Splicing Index fold which is also relative to experimental not control
if (firma_avg2 * firma_avg1) < 0:
opposite_FIRMA_scores = 'yes'
else:
opposite_FIRMA_scores = 'no'
if probeset in midas_db:
try:
midas_p = float(midas_db[probeset])
except ValueError:
midas_p = 0
else:
midas_p = 0
#if probeset == '3263614': print firma_fold_change, normIntensityP, midas_p,'\n',firma_list1, firma_list2, [p_threshold];kill
if abs(firma_fold_change) > alt_exon_logfold_cutoff and (
normIntensityP < p_threshold or normIntensityP == 'NA') and midas_p < p_threshold:
exonid = ed.ExonID();
critical_exon_list = [1, [exonid]]
#gene_expression_values = original_avg_const_exp_db[geneid]
constit_exp1 = original_avg_const_exp_db[geneid][index1]
constit_exp2 = original_avg_const_exp_db[geneid][index2]
ge_fold = constit_exp2 - constit_exp1
### Re-define all of the pairwise values now that the two FIRMA groups to report have been determined
data_list1 = array_raw_group_values[probeset][index1];
data_list2 = array_raw_group_values[probeset][index2]
baseline_exp = statistics.avg(data_list1);
experimental_exp = statistics.avg(data_list2);
fold_change = experimental_exp - baseline_exp
group_name1 = array_group_list[index1];
group_name2 = array_group_list[index2]
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1, data_list2, probability_statistic)
except Exception:
ttest_exp_p = 1
normInt1 = (baseline_exp - constit_exp1);
normInt2 = (experimental_exp - constit_exp2);
adj_fold = normInt2 - normInt1
ped = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p,
group_name2 + '_vs_' + group_name1)
fid = ExonData(firma_fold_change, probeset, critical_exon_list, geneid, data_list1, data_list2,
normIntensityP, opposite_FIRMA_scores)
fid.setConstitutiveExpression(constit_exp1);
fid.setConstitutiveFold(ge_fold);
fid.setProbesetExpressionData(ped)
firma_hash.append((firma_fold_change, fid))
#print [[[probeset,firma_fold_change,normIntensityP,p_threshold]]]
else:
### Also record the data for probesets that are excluded... Used by DomainGraph
eed = ExcludedExonData(firma_fold_change, geneid, normIntensityP)
excluded_probeset_db[probeset] = eed
print 'FIRMA analysis complete'
if export_NI_values == 'yes': data.close()
firma_hash.sort();
firma_hash.reverse()
print len(firma_hash), "Probesets with evidence of Alternative expression out of", len(excluded_probeset_db) + len(
firma_hash)
p_value_call = '';
permute_p_values = {};
summary_data_db['denominator_exp_events'] = denominator_probesets
return firma_hash, p_value_call, permute_p_values, excluded_probeset_db
def getFilteredFilename(filename):
if array_type == 'junction':
filename = string.replace(filename, '.txt', '-filtered.txt')
return filename
def getExonVersionFilename(filename):
original_filename = filename
if array_type == 'junction' or array_type == 'RNASeq':
if explicit_data_type != 'null':
filename = string.replace(filename, array_type, array_type + '/' + explicit_data_type)
### Make sure the file exists, otherwise, use the original
file_status = verifyFile(filename)
#print [[filename,file_status]]
if file_status != 'found': filename = original_filename
return filename
def importProbesetAligningDomains(exon_db, report_type):
filename = 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_domain_aligning_probesets.txt'
filename = getFilteredFilename(filename)
probeset_aligning_db = importGenericDBList(filename)
filename = 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_indirect_domain_aligning_probesets.txt'
filename = getFilteredFilename(filename)
probeset_indirect_aligning_db = importGenericDBList(filename)
if array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
new_exon_db = {};
splicing_call_db = {}
for probeset_pair in exon_db:
### For junction analyses exon_db is really regulated_exon_junction_db, containing the inclusion,exclusion probeset tuple and an object as values
ed = exon_db[probeset_pair];
geneid = ed.GeneID();
critical_exons = ed.CriticalExons()
for exon in critical_exons:
new_key = geneid + ':' + exon
try:
new_exon_db[new_key].append(probeset_pair)
except KeyError:
new_exon_db[new_key] = [probeset_pair]
try:
splicing_call_db[new_key].append(ed.SplicingCall())
except KeyError:
splicing_call_db[new_key] = [ed.SplicingCall()]
for key in new_exon_db:
probeset_pairs = new_exon_db[key];
probeset_pair = probeset_pairs[0] ### grab one of the probeset pairs
ed = exon_db[probeset_pair];
geneid = ed.GeneID()
jd = SimpleJunctionData(geneid, '', '', '',
probeset_pairs) ### use only those necessary fields for this function (probeset pairs will be called as CriticalExons)
splicing_call_db[key].sort();
splicing_call = splicing_call_db[key][-1];
jd.setSplicingCall(splicing_call) ### Bug from 1.15 to have key be new_key?
new_exon_db[key] = jd
exon_db = new_exon_db
gene_protein_ft_db = {};
domain_gene_count_db = {};
protein_functional_attribute_db = {};
probeset_aligning_db2 = {}
splicing_call_db = [];
new_exon_db = [] ### Clear memory
for probeset in exon_db:
#if probeset == '107650':
#if probeset in probeset_aligning_db: print probeset_aligning_db[probeset];kill
if probeset in probeset_aligning_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else:
proceed = 'yes'
gene = exon_db[probeset].GeneID()
new_domain_list = [];
new_domain_list2 = []
if report_type == 'gene' and proceed == 'yes':
for domain in probeset_aligning_db[probeset]:
try:
domain_gene_count_db[domain].append(gene)
except KeyError:
domain_gene_count_db[domain] = [gene]
try:
gene_protein_ft_db[gene].append(domain)
except KeyError:
gene_protein_ft_db[gene] = [domain]
elif proceed == 'yes':
if array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
probeset_list = exon_db[probeset].CriticalExons()
else:
probeset_list = [probeset]
for id in probeset_list:
for domain in probeset_aligning_db[probeset]:
new_domain_list.append('(direct)' + domain)
new_domain_list2.append((domain, '+'))
new_domain_list = unique.unique(new_domain_list)
new_domain_list_str = string.join(new_domain_list, ', ')
gene_protein_ft_db[gene, id] = new_domain_list2
probeset_aligning_db2[id] = new_domain_list_str
#print exon_db['107650']
for probeset in exon_db:
if probeset in probeset_indirect_aligning_db:
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else:
proceed = 'yes'
gene = exon_db[probeset].GeneID()
new_domain_list = [];
new_domain_list2 = []
if report_type == 'gene' and proceed == 'yes':
for domain in probeset_indirect_aligning_db[probeset]:
try:
domain_gene_count_db[domain].append(gene)
except KeyError:
domain_gene_count_db[domain] = [gene]
try:
gene_protein_ft_db[gene].append(domain)
except KeyError:
gene_protein_ft_db[gene] = [domain]
elif proceed == 'yes':
if array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
probeset_list = exon_db[probeset].CriticalExons()
else:
probeset_list = [probeset]
for id in probeset_list:
for domain in probeset_indirect_aligning_db[probeset]:
new_domain_list.append('(indirect)' + domain)
new_domain_list2.append((domain, '-'))
new_domain_list = unique.unique(new_domain_list)
new_domain_list_str = string.join(new_domain_list, ', ')
gene_protein_ft_db[gene, id] = new_domain_list2
probeset_aligning_db2[id] = new_domain_list_str
domain_gene_count_db = eliminate_redundant_dict_values(domain_gene_count_db)
gene_protein_ft_db = eliminate_redundant_dict_values(gene_protein_ft_db)
if analysis_method == 'ASPIRE' or analysis_method == 'linearregres':
clearObjectsFromMemory(exon_db);
exon_db = []
try:
clearObjectsFromMemory(new_exon_db)
except Exception:
null = []
probeset_indirect_aligning_db = [];
probeset_aligning_db = []
if report_type == 'perfect_match':
gene_protein_ft_db = [];
domain_gene_count_db = [];
protein_functional_attribute_db = []
return probeset_aligning_db2
elif report_type == 'probeset':
probeset_aligning_db2 = []
return gene_protein_ft_db, domain_gene_count_db, protein_functional_attribute_db
else:
probeset_aligning_db2 = [];
protein_functional_attribute_db = [];
probeset_aligning_db2 = []
len_gene_protein_ft_db = len(gene_protein_ft_db);
gene_protein_ft_db = []
return len_gene_protein_ft_db, domain_gene_count_db
def importProbesetProteinCompDomains(exon_db, report_type, comp_type):
filename = 'AltDatabase/' + species + '/' + array_type + '/probeset-domain-annotations-' + comp_type + '.txt'
if (
array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type != 'null': filename = getFilteredFilename(
filename)
filename = getExonVersionFilename(filename)
probeset_aligning_db = importGeneric(filename)
filename = 'AltDatabase/' + species + '/' + array_type + '/probeset-protein-annotations-' + comp_type + '.txt'
if (
array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type != 'null': filename = getFilteredFilename(
filename)
filename = getExonVersionFilename(filename)
gene_protein_ft_db = {};
domain_gene_count_db = {}
for probeset in exon_db:
initial_proceed = 'no';
original_probeset = probeset
if probeset in probeset_aligning_db:
initial_proceed = 'yes'
elif array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
### For junction analyses exon_db is really regulated_exon_junction_db, containing the inclusion,exclusion probeset tuple and an object as values
if '|' in probeset[0]: probeset1 = string.split(probeset[0], '|')[0]; probeset = probeset1, probeset[1]
try:
alternate_probeset_id = exon_db[probeset].InclusionLookup(); probeset = alternate_probeset_id, probeset[
1]
except Exception:
null = []
probeset_joined = string.join(probeset, '|')
#print [probeset_joined],[probeset]
if probeset_joined in probeset_aligning_db:
initial_proceed = 'yes'; probeset = probeset_joined
elif probeset[0] in probeset_aligning_db:
initial_proceed = 'yes'; probeset = probeset[0]
elif probeset[1] in probeset_aligning_db:
initial_proceed = 'yes'; probeset = probeset[1]
#else: for i in probeset_aligning_db: print [i];kill
if initial_proceed == 'yes':
proceed = 'no'
if filter_for_AS == 'yes':
as_call = exon_db[original_probeset].SplicingCall()
if as_call == 1: proceed = 'yes'
else:
proceed = 'yes'
new_domain_list = []
gene = exon_db[original_probeset].GeneID()
if report_type == 'gene' and proceed == 'yes':
for domain_data in probeset_aligning_db[probeset]:
try:
domain, call = string.split(domain_data, '|')
except Exception:
values = string.split(domain_data, '|')
domain = values[0];
call = values[-1] ### occurs when a | exists in the annotations from UniProt
try:
domain_gene_count_db[domain].append(gene)
except KeyError:
domain_gene_count_db[domain] = [gene]
try:
gene_protein_ft_db[gene].append(domain)
except KeyError:
gene_protein_ft_db[gene] = [domain]
elif proceed == 'yes':
for domain_data in probeset_aligning_db[probeset]:
domain, call = string.split(domain_data, '|')
new_domain_list.append((domain, call))
#new_domain_list = string.join(new_domain_list,', ')
gene_protein_ft_db[gene, original_probeset] = new_domain_list
domain_gene_count_db = eliminate_redundant_dict_values(domain_gene_count_db)
probeset_aligning_db = [] ### Clear memory
probeset_aligning_protein_db = importGeneric(filename)
probeset_pairs = {} ### Store all possible probeset pairs as single probesets for protein-protein associations
for probeset in exon_db:
if len(probeset) == 2:
for p in probeset: probeset_pairs[p] = probeset
if report_type == 'probeset':
### Below code was re-written to be more memory efficient by not storing all data in probeset-domain-annotations-*comp*.txt via generic import
protein_functional_attribute_db = {};
probeset_protein_associations = {};
protein_db = {}
for probeset in exon_db:
initial_proceed = 'no';
original_probeset = probeset
if probeset in probeset_aligning_protein_db:
initial_proceed = 'yes'
elif array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
if '|' in probeset[0]: probeset1 = string.split(probeset[0], '|')[0]; probeset = probeset1, probeset[1]
try:
alternate_probeset_id = exon_db[probeset].InclusionLookup(); probeset = alternate_probeset_id, \
probeset[1]
except Exception:
null = []
probeset_joined = string.join(probeset, '|')
#print [probeset_joined],[probeset]
if probeset_joined in probeset_aligning_protein_db:
initial_proceed = 'yes'; probeset = probeset_joined
elif probeset[0] in probeset_aligning_protein_db:
initial_proceed = 'yes'; probeset = probeset[0]
elif probeset[1] in probeset_aligning_protein_db:
initial_proceed = 'yes'; probeset = probeset[1]
#else: for i in probeset_aligning_db: print [i];kill
if initial_proceed == 'yes':
protein_data_list = probeset_aligning_protein_db[probeset]
new_protein_list = []
gene = exon_db[original_probeset].GeneID()
for protein_data in protein_data_list:
protein_info, call = string.split(protein_data, '|')
if 'AA:' in protein_info:
protein_info_r = string.replace(protein_info, ')', '*')
protein_info_r = string.replace(protein_info_r, '(', '*')
protein_info_r = string.split(protein_info_r, '*')
null_protein = protein_info_r[1];
hit_protein = protein_info_r[3]
probeset_protein_associations[original_probeset] = null_protein, hit_protein, call
protein_db[null_protein] = [];
protein_db[hit_protein] = []
new_protein_list.append((protein_info, call))
#new_protein_list = string.join(new_domain_list,', ')
protein_functional_attribute_db[gene, original_probeset] = new_protein_list
filename = 'AltDatabase/' + species + '/' + array_type + '/SEQUENCE-protein-dbase_' + comp_type + '.txt'
filename = getExonVersionFilename(filename)
protein_seq_db = importGenericFiltered(filename, protein_db)
for key in protein_functional_attribute_db:
gene, probeset = key
try:
null_protein, hit_protein, call = probeset_protein_associations[probeset]
null_seq = protein_seq_db[null_protein][0];
hit_seq = protein_seq_db[hit_protein][0]
seq_attr = 'sequence: ' + '(' + null_protein + ')' + null_seq + ' -> ' + '(' + hit_protein + ')' + hit_seq
protein_functional_attribute_db[key].append((seq_attr, call))
except KeyError:
null = []
protein_seq_db = [];
probeset_aligning_protein_db = []
return gene_protein_ft_db, domain_gene_count_db, protein_functional_attribute_db
else:
probeset_aligning_protein_db = [];
len_gene_protein_ft_db = len(gene_protein_ft_db);
gene_protein_ft_db = []
return len_gene_protein_ft_db, domain_gene_count_db
class SimpleJunctionData:
def __init__(self, geneid, probeset1, probeset2, probeset1_display, critical_exon_list):
self._geneid = geneid;
self._probeset1 = probeset1;
self._probeset2 = probeset2
self._probeset1_display = probeset1_display;
self._critical_exon_list = critical_exon_list
def GeneID(self): return self._geneid
def Probeset1(self): return self._probeset1
def Probeset2(self): return self._probeset2
def InclusionDisplay(self): return self._probeset1_display
def CriticalExons(self): return self._critical_exon_list
def setSplicingCall(self, splicing_call):
#self._splicing_call = EvidenceOfAltSplicing(slicing_annot)
self._splicing_call = splicing_call
def setSymbol(self, symbol): self.symbol = symbol
def Symbol(self): return self.symbol
def SplicingCall(self): return self._splicing_call
def setInclusionLookup(self, incl_junction_probeset): self.incl_junction_probeset = incl_junction_probeset
def InclusionLookup(self): return self.incl_junction_probeset
def formatJunctionData(probesets, affygene, critical_exon_list):
if '|' in probesets[0]: ### Only return the first inclusion probeset (agglomerated probesets)
incl_list = string.split(probesets[0], '|')
incl_probeset = incl_list[0];
excl_probeset = probesets[1]
else:
incl_probeset = probesets[0]; excl_probeset = probesets[1]
jd = SimpleJunctionData(affygene, incl_probeset, excl_probeset, probesets[0], critical_exon_list)
key = incl_probeset, excl_probeset
return key, jd
class JunctionExpressionData:
def __init__(self, baseline_norm_exp, exper_norm_exp, pval, ped):
self.baseline_norm_exp = baseline_norm_exp;
self.exper_norm_exp = exper_norm_exp;
self.pval = pval;
self.ped = ped
def ConNI(self):
ls = []
for i in self.logConNI():
ls.append(math.pow(2, i))
return ls
def ExpNI(self):
ls = []
for i in self.logExpNI():
ls.append(math.pow(2, i))
return ls
def ConNIAvg(self):
return math.pow(2, statistics.avg(self.logConNI()))
def ExpNIAvg(self):
return math.pow(2, statistics.avg(self.logExpNI()))
def logConNI(self):
return self.baseline_norm_exp
def logExpNI(self):
return self.exper_norm_exp
def Pval(self):
return self.pval
def ProbesetExprData(self):
return self.ped
def __repr__(self):
return self.ConNI() + '|' + self.ExpNI()
def calculateAllASPIREScores(p1, p2):
b1o = p1.ConNIAvg();
b2o = p2.ConNIAvg()
e1o = p1.ExpNIAvg();
e2o = p2.ExpNIAvg();
original_score = statistics.aspire_stringent(b1o, e1o, b2o, e2o)
index = 0;
baseline_scores = [] ### Loop through each control ratio and compare to control ratio mean
for b1 in p1.ConNI():
b2 = p2.ConNI()[index]
score = statistics.aspire_stringent(b2, e2o, b1, e1o);
index += 1
baseline_scores.append(score)
index = 0;
exp_scores = [] ### Loop through each experimental ratio and compare to control ratio mean
for e1 in p1.ExpNI():
e2 = p2.ExpNI()[index]
score = statistics.aspire_stringent(b1o, e1, b2o, e2);
index += 1
exp_scores.append(score)
try:
aspireP = statistics.runComparisonStatistic(baseline_scores, exp_scores, probability_statistic)
except Exception:
aspireP = 'NA' ### Occurs when analyzing two groups with no variance
if aspireP == 1: aspireP = 'NA'
"""
if aspireP<0.05 and oscore>0.2 and statistics.avg(exp_scores)<0:
index=0
for e1 in p1.ExpNI():
e2 = p2.ExpNI()[index]
score = statistics.aspire_stringent(b1,e1,b2,e2)
print p1.ExpNI(), p2.ExpNI(); print e1, e2
print e1o,e2o; print b1, b2; print score, original_score
print exp_scores, statistics.avg(exp_scores); kill"""
return baseline_scores, exp_scores, aspireP
def stringListConvert(ls):
ls2 = []
for i in ls: ls2.append(str(i))
return ls2
def analyzeJunctionSplicing(nonlog_NI_db):
group_sizes = [];
original_array_indices = permute_lists[
0] ###p[0] is the original organization of the group samples prior to permutation
for group in original_array_indices: group_sizes.append(len(group))
### Used to restrict the analysis to a pre-selected set of probesets (e.g. those that have a specifc splicing pattern)
if len(filtered_probeset_db) > 0:
temp_db = {}
for probeset in nonlog_NI_db: temp_db[probeset] = []
for probeset in temp_db:
try:
filtered_probeset_db[probeset]
except KeyError:
del nonlog_NI_db[probeset]
### Used to the export relative individual adjusted probesets fold changes used for splicing index values
if export_NI_values == 'yes':
global NIdata_export
summary_output = root_dir + 'AltResults/RawSpliceData/' + species + '/' + analysis_method + '/' + dataset_name[
:-1] + '.txt'
NIdata_export = export.ExportFile(summary_output)
title = string.join(['inclusion-probeset', 'exclusion-probeset'] + original_array_names, '\t') + '\n';
NIdata_export.write(title)
### Calculate a probeset p-value adjusted for constitutive expression levels (taken from splicing index method)
xl = 0
probeset_normIntensity_db = {}
for probeset in array_raw_group_values:
ed = exon_db[probeset];
geneid = ed.GeneID();
xl += 1
#if geneid in alt_junction_db and geneid in original_avg_const_exp_db: ### Don't want this filter since it causes problems for Trans-splicing
group_index = 0;
si_interim_group_db = {};
ge_threshold_count = 0;
value_count = 0
### Prepare normalized expression lists for recipricol-junction algorithms
if geneid in avg_const_exp_db:
for group_values in array_raw_group_values[probeset]:
value_index = 0;
ratio_hash = []
for value in group_values: ###Calculate normalized ratio's for each condition and save raw values for later permutation
exp_val = value;
ge_val = avg_const_exp_db[geneid][value_count];
exp_ratio = exp_val - ge_val
ratio_hash.append(exp_ratio);
value_index += 1;
value_count += 1
si_interim_group_db[group_index] = ratio_hash
group_index += 1
group1_ratios = si_interim_group_db[0];
group2_ratios = si_interim_group_db[1]
### Calculate and store simple expression summary stats
data_list1 = array_raw_group_values[probeset][0];
data_list2 = array_raw_group_values[probeset][1]
baseline_exp = statistics.avg(data_list1);
experimental_exp = statistics.avg(data_list2);
fold_change = experimental_exp - baseline_exp
#group_name1 = array_group_list[0]; group_name2 = array_group_list[1]
try:
ttest_exp_p = statistics.runComparisonStatistic(data_list1, data_list2, probability_statistic)
except Exception:
ttest_exp_p = 'NA'
if ttest_exp_p == 1: ttest_exp_p = 'NA'
adj_fold = statistics.avg(group2_ratios) - statistics.avg(group1_ratios)
ped = ProbesetExpressionData(baseline_exp, experimental_exp, fold_change, adj_fold, ttest_exp_p, '')
try:
try:
normIntensityP = statistics.runComparisonStatistic(group1_ratios, group2_ratios,
probability_statistic)
except Exception:
#print group1_ratios,group2_ratios,array_raw_group_values[probeset],avg_const_exp_db[geneid];kill
normIntensityP = 'NA' ###occurs for constitutive probesets
except Exception:
normIntensityP = 0
if normIntensityP == 1: normIntensityP = 'NA'
ji = JunctionExpressionData(group1_ratios, group2_ratios, normIntensityP, ped)
probeset_normIntensity_db[probeset] = ji ### store and access this below
#if probeset == 'G6899622@J916374@j_at': print normIntensityP,group1_ratios,group2_ratios;kill
###Concatenate the two raw expression groups into a single list for permutation analysis
ls_concatenated = []
for group in array_raw_group_values[probeset]:
for entry in group: ls_concatenated.append(entry)
if analysis_method == 'linearregres': ###Convert out of log space
ls_concatenated = statistics.log_fold_conversion_fraction(ls_concatenated)
array_raw_group_values[probeset] = ls_concatenated
s = 0;
t = 0;
y = '';
denominator_events = 0;
excluded_probeset_db = {}
splice_event_list = [];
splice_event_list_mx = [];
splice_event_list_non_mx = [];
event_mx_temp = [];
permute_p_values = {} #use this to exclude duplicate mx events
for affygene in alt_junction_db:
if affygene in original_avg_const_exp_db:
constit_exp1 = original_avg_const_exp_db[affygene][0]
constit_exp2 = original_avg_const_exp_db[affygene][1]
ge_fold = constit_exp2 - constit_exp1
for event in alt_junction_db[affygene]:
if array_type == 'AltMouse':
#event = [('ei', 'E16-E17'), ('ex', 'E16-E18')]
#critical_exon_db[affygene,tuple(critical_exons)] = [1,'E'+str(e1a),'E'+str(e2b)] --- affygene,tuple(event) == key, 1 indicates both are either up or down together
event_call = event[0][0] + '-' + event[1][0]
exon_set1 = event[0][1];
exon_set2 = event[1][1]
probeset1 = exon_dbase[affygene, exon_set1]
probeset2 = exon_dbase[affygene, exon_set2]
critical_exon_list = critical_exon_db[affygene, tuple(event)]
if array_type == 'junction' or array_type == 'RNASeq':
event_call = 'ei-ex' ### Below objects from JunctionArrayEnsemblRules - class JunctionInformation
probeset1 = event.InclusionProbeset();
probeset2 = event.ExclusionProbeset()
exon_set1 = event.InclusionJunction();
exon_set2 = event.ExclusionJunction()
try:
novel_event = event.NovelEvent()
except Exception:
novel_event = 'known'
critical_exon_list = [1, event.CriticalExonSets()]
key, jd = formatJunctionData([probeset1, probeset2], affygene, critical_exon_list[1])
if array_type == 'junction' or array_type == 'RNASeq':
try:
jd.setSymbol(annotate_db[affygene].Symbol())
except Exception:
null = []
#if '|' in probeset1: print probeset1, key,jd.InclusionDisplay();kill
probeset_comp_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
#print probeset1,probeset2, critical_exon_list,event_call,exon_set1,exon_set2;kill
if probeset1 in nonlog_NI_db and probeset2 in nonlog_NI_db:
denominator_events += 1
try:
p1 = probeset_normIntensity_db[probeset1]; p2 = probeset_normIntensity_db[probeset2]
except Exception:
print probeset1, probeset2
p1 = probeset_normIntensity_db[probeset1]
p2 = probeset_normIntensity_db[probeset2]
#if '|' in probeset1: print
pp1 = p1.Pval();
pp2 = p2.Pval()
baseline_ratio1 = p1.ConNIAvg()
experimental_ratio1 = p1.ExpNIAvg()
baseline_ratio2 = p2.ConNIAvg()
experimental_ratio2 = p2.ExpNIAvg()
ped1 = p1.ProbesetExprData()
ped2 = p2.ProbesetExprData()
Rin = '';
Rex = ''
r = 0 ###Variable used to determine if we should take the absolute value of dI for mutually exlcusive events
if event_call == 'ei-ex': #means probeset1 is an exon inclusion and probeset2 is an exon exclusion
Rin = baseline_ratio1 / experimental_ratio1 # Rin=A/C
Rex = baseline_ratio2 / experimental_ratio2 # Rin=B/D
I1 = baseline_ratio1 / (baseline_ratio1 + baseline_ratio2)
I2 = experimental_ratio1 / (experimental_ratio1 + experimental_ratio2)
###When Rex is larger, the exp_ratio for exclusion is decreased in comparison to baseline.
###Thus, increased inclusion (when Rin is small, inclusion is big)
if (Rin > 1 and Rex < 1):
y = 'downregulated'
elif (Rin < 1 and Rex > 1):
y = 'upregulated'
elif (Rex < Rin):
y = 'downregulated'
else:
y = 'upregulated'
temp_list = []
if event_call == 'mx-mx':
temp_list.append(exon_set1);
temp_list.append(exon_set2);
temp_list.sort()
if (affygene,
temp_list) not in event_mx_temp: #use this logic to prevent mx entries being added more than once
event_mx_temp.append((affygene, temp_list))
###Arbitrarily choose which exon-set will be Rin or Rex, does matter for mutually exclusive events
Rin = baseline_ratio1 / experimental_ratio1 # Rin=A/C
Rex = baseline_ratio2 / experimental_ratio2 # Rin=B/D
I1 = baseline_ratio1 / (baseline_ratio1 + baseline_ratio2)
I2 = experimental_ratio1 / (experimental_ratio1 + experimental_ratio2)
y = 'mutually-exclusive';
r = 1
if analysis_method == 'ASPIRE' and Rex != '':
#if affygene == 'ENSMUSG00000000126': print Rin, Rex, probeset1, probeset2
if (Rin > 1 and Rex < 1) or (Rin < 1 and Rex > 1):
s += 1
in1 = ((Rex - 1.0) * Rin) / (Rex - Rin);
in2 = (Rex - 1.0) / (Rex - Rin)
dI = ((in2 - in1) + (I2 - I1)) / 2.0 #modified to give propper exon inclusion
dI = dI * (-1) ### Reverse the fold to make equivalent to splicing-index and FIRMA scores
try:
baseline_scores, exp_scores, aspireP = calculateAllASPIREScores(p1, p2)
except Exception:
baseline_scores = [0]; exp_scores = [dI]; aspireP = 0
if export_NI_values == 'yes':
baseline_scores = stringListConvert(baseline_scores);
exp_scores = stringListConvert(exp_scores)
ev = string.join([probeset1, probeset2] + baseline_scores + exp_scores, '\t') + '\n';
NIdata_export.write(ev)
if max_replicates > 2 or equal_replicates == 2:
permute_p_values[(probeset1, probeset2)] = [aspireP, 'NA', 'NA', 'NA']
if r == 1: dI = abs(dI) ###Occurs when event is mutually exclusive
#if abs(dI)>alt_exon_logfold_cutoff: print [dI],pp1,pp2,aspireP;kill
#print [affygene,dI,pp1,pp2,aspireP,event.CriticalExonSets(),probeset1,probeset2,alt_exon_logfold_cutoff,p_threshold]
if ((pp1 < p_threshold or pp2 < p_threshold) or pp1 == 1 or pp1 == 'NA') and abs(
dI) > alt_exon_logfold_cutoff: ###Require that the splice event have a constitutive corrected p less than the user defined threshold
ejd = ExonJunctionData(dI, probeset1, probeset2, pp1, pp2, y, event_call,
critical_exon_list, affygene, ped1, ped2)
"""if probeset1 == 'ENSMUSG00000033335:E16.1-E17.1' and probeset2 == 'ENSMUSG00000033335:E16.1-E19.1':
print [dI,pp1,pp2,p_threshold,alt_exon_logfold_cutoff]
print baseline_scores, exp_scores, [aspireP]#;sys.exit()"""
ejd.setConstitutiveExpression(constit_exp1);
ejd.setConstitutiveFold(ge_fold)
if perform_permutation_analysis == 'yes':
splice_event_list.append((dI, ejd))
elif aspireP < permute_p_threshold or aspireP == 'NA':
splice_event_list.append((dI, ejd))
#if abs(dI)>.2: print probeset1, probeset2, critical_exon_list, [exon_set1], [exon_set2]
#if dI>.2 and aspireP<0.05: print baseline_scores,exp_scores,aspireP, statistics.avg(exp_scores), dI
elif array_type == 'junction' or array_type == 'RNASeq':
excluded_probeset_db[affygene + ':' + event.CriticalExonSets()[
0]] = probeset1, affygene, dI, 'NA', aspireP
if array_type == 'RNASeq':
try:
ejd.setNovelEvent(novel_event)
except Exception:
None
if analysis_method == 'linearregres' and Rex != '':
s += 1
log_fold, linregressP, rsqrd_status = getLinearRegressionScores(probeset1, probeset2,
group_sizes)
log_fold = log_fold ### Reverse the fold to make equivalent to splicing-index and FIRMA scores
if max_replicates > 2 or equal_replicates == 2: permute_p_values[(probeset1, probeset2)] = [
linregressP, 'NA', 'NA', 'NA']
if rsqrd_status == 'proceed':
if ((pp1 < p_threshold or pp2 < p_threshold) or pp1 == 1 or pp1 == 'NA') and abs(
log_fold) > alt_exon_logfold_cutoff: ###Require that the splice event have a constitutive corrected p less than the user defined threshold
ejd = ExonJunctionData(log_fold, probeset1, probeset2, pp1, pp2, y, event_call,
critical_exon_list, affygene, ped1, ped2)
ejd.setConstitutiveExpression(constit_exp1);
ejd.setConstitutiveFold(ge_fold)
if perform_permutation_analysis == 'yes':
splice_event_list.append((log_fold, ejd))
elif linregressP < permute_p_threshold:
splice_event_list.append((log_fold, ejd))
#if probeset1 == 'G6990053@762121_762232_at' and probeset2 == 'G6990053@J926254@j_at':
#print event_call, critical_exon_list,affygene, Rin, Rex, y, temp_list;kill
elif array_type == 'junction' or array_type == 'RNASeq':
excluded_probeset_db[affygene + ':' + event.CriticalExonSets()[
0]] = probeset1, affygene, log_fold, 'NA', linregressP
if array_type == 'RNASeq':
try:
ejd.setNovelEvent(novel_event)
except Exception:
None
else:
t += 1
clearObjectsFromMemory(probeset_normIntensity_db)
probeset_normIntensity_db = {}; ### Potentially large memory object containing summary stats for all probesets
statistics.adjustPermuteStats(permute_p_values)
summary_data_db['denominator_exp_events'] = denominator_events
print "Number of exon-events analyzed:", s
print "Number of exon-events excluded:", t
return splice_event_list, probeset_comp_db, permute_p_values, excluded_probeset_db
def maxReplicates():
replicates = 0;
greater_than_two = 0;
greater_than_one = 0;
group_sizes = []
for probeset in array_raw_group_values:
for group_values in array_raw_group_values[probeset]:
try:
replicates += len(group_values);
group_sizes.append(len(group_values))
if len(group_values) > 2:
greater_than_two += 1
elif len(group_values) > 1:
greater_than_one += 1
except Exception:
replicates += len(array_raw_group_values[probeset]); break
break
group_sizes = unique.unique(group_sizes)
if len(group_sizes) == 1:
equal_replicates = group_sizes[0]
else:
equal_replicates = 0
max_replicates = replicates / float(original_conditions)
if max_replicates < 2.01:
if greater_than_two > 0 and greater_than_one > 0: max_replicates = 3
return max_replicates, equal_replicates
def furtherProcessJunctionScores(splice_event_list, probeset_comp_db, permute_p_values):
splice_event_list.sort();
splice_event_list.reverse()
print "filtered %s scores:" % analysis_method, len(splice_event_list)
if perform_permutation_analysis == 'yes':
###*********BEGIN PERMUTATION ANALYSIS*********
if max_replicates > 2 or equal_replicates == 2:
splice_event_list, p_value_call, permute_p_values = permuteSplicingScores(splice_event_list)
else:
print "WARNING...Not enough replicates to perform permutation analysis."
p_value_call = '';
permute_p_values = {}
else:
if max_replicates > 2 or equal_replicates == 2:
if probability_statistic == 'unpaired t-test':
p_value_call = analysis_method + '-OneWayAnova'
else:
p_value_call = analysis_method + '-' + probability_statistic
else:
if probability_statistic == 'unpaired t-test':
p_value_call = 'OneWayAnova';
permute_p_values = {}
else:
p_value_call = probability_statistic;
permute_p_values = {}
print len(splice_event_list), 'alternative events after subsequent filtering (optional)'
### Get ExonJunction annotaitons
junction_splicing_annot_db = getJunctionSplicingAnnotations(probeset_comp_db)
regulated_exon_junction_db = {};
new_splice_event_list = []
if filter_for_AS == 'yes': print "Filtering for evidence of Alternative Splicing"
for (fold, ejd) in splice_event_list:
proceed = 'no'
if filter_for_AS == 'yes':
try:
ja = junction_splicing_annot_db[ejd.Probeset1(), ejd.Probeset2()];
splicing_call = ja.SplicingCall()
if splicing_call == 1: proceed = 'yes'
except KeyError:
proceed = 'no'
else:
proceed = 'yes'
if proceed == 'yes':
key, jd = formatJunctionData([ejd.Probeset1(), ejd.Probeset2()], ejd.GeneID(), ejd.CriticalExons())
regulated_exon_junction_db[key] = jd ### This is used for the permutation analysis and domain/mirBS import
new_splice_event_list.append((fold, ejd))
### Add junction probeset lookup for reciprocal junctions composed of an exonid (not in protein database currently)
if array_type == 'RNASeq' and '-' not in key[0]: ### Thus, it is an exon compared to a junction
events = alt_junction_db[ejd.GeneID()]
for ji in events:
if (ji.InclusionProbeset(), ji.ExclusionProbeset()) == key:
jd.setInclusionLookup(
ji.InclusionLookup()) ### This is the source junction from which the exon ID comes from
probeset_comp_db[ji.InclusionLookup(), ji.ExclusionProbeset()] = jd
#print ji.InclusionProbeset(),ji.ExclusionProbeset(),' ',ji.InclusionLookup()
if filter_for_AS == 'yes': print len(
new_splice_event_list), "remaining after filtering for evidence of Alternative splicing"
filtered_exon_db = {}
for junctions in probeset_comp_db:
rj = probeset_comp_db[
junctions] ### Add splicing annotations to the AltMouse junction DBs (needed for permutation analysis statistics and filtering)
try:
ja = junction_splicing_annot_db[junctions]; splicing_call = ja.SplicingCall(); rj.setSplicingCall(
ja.SplicingCall())
except KeyError:
rj.setSplicingCall(0)
if filter_for_AS == 'yes': filtered_exon_db[junctions] = rj
for junctions in regulated_exon_junction_db:
rj = regulated_exon_junction_db[junctions]
try:
ja = junction_splicing_annot_db[junctions]; rj.setSplicingCall(ja.SplicingCall())
except KeyError:
rj.setSplicingCall(0)
if filter_for_AS == 'yes': probeset_comp_db = filtered_exon_db
try:
clearObjectsFromMemory(alt_junction_db)
except Exception:
null = []
return new_splice_event_list, p_value_call, permute_p_values, probeset_comp_db, regulated_exon_junction_db
class SplicingScoreData:
def Method(self):
###e.g. ASPIRE
return self._method
def Score(self): return str(self._score)
def Probeset1(self): return self._probeset1
def Probeset2(self): return self._probeset2
def RegulationCall(self): return self._regulation_call
def GeneID(self): return self._geneid
def CriticalExons(self): return self._critical_exon_list[1]
def CriticalExonTuple(self): return self._critical_exon_list
def TTestNormalizedRatios(self): return self._normIntensityP
def TTestNormalizedRatios2(self): return self._normIntensityP2
def setConstitutiveFold(self, exp_log_ratio): self._exp_log_ratio = exp_log_ratio
def ConstitutiveFold(self): return str(self._exp_log_ratio)
def setConstitutiveExpression(self, const_baseline): self.const_baseline = const_baseline
def ConstitutiveExpression(self): return str(self.const_baseline)
def setProbesetExpressionData(self, ped): self.ped1 = ped
def ProbesetExprData1(self): return self.ped1
def ProbesetExprData2(self): return self.ped2
def setNovelEvent(self, novel_event): self._novel_event = novel_event
def NovelEvent(self): return self._novel_event
def EventCall(self):
###e.g. Exon inclusion (ei) Exon exclusion (ex), ei-ex, reported in that direction
return self._event_call
def Report(self):
output = self.Method() + '|' + self.GeneID() + '|' + string.join(self.CriticalExons(), '|')
return output
def __repr__(self): return self.Report()
class ExonJunctionData(SplicingScoreData):
def __init__(self, score, probeset1, probeset2, probeset1_p, probeset2_p, regulation_call, event_call,
critical_exon_list, affygene, ped1, ped2):
self._score = score;
self._probeset1 = probeset1;
self._probeset2 = probeset2;
self._regulation_call = regulation_call
self._event_call = event_call;
self._critical_exon_list = critical_exon_list;
self._geneid = affygene
self._method = analysis_method;
self._normIntensityP = probeset1_p;
self._normIntensityP2 = probeset2_p
self.ped1 = ped1;
self.ped2 = ped2
class ExonData(SplicingScoreData):
def __init__(self, splicing_index, probeset, critical_exon_list, geneid, group1_ratios, group2_ratios,
normIntensityP, opposite_SI_log_mean):
self._score = splicing_index;
self._probeset1 = probeset;
self._opposite_SI_log_mean = opposite_SI_log_mean
self._critical_exon_list = critical_exon_list;
self._geneid = geneid
self._baseline_ratio1 = group1_ratios;
self._experimental_ratio1 = group2_ratios
self._normIntensityP = normIntensityP
self._method = analysis_method;
self._event_call = 'exon-inclusion'
if splicing_index > 0:
regulation_call = 'downregulated' ###Since baseline is the numerator ratio
else:
regulation_call = 'upregulated'
self._regulation_call = regulation_call
def OppositeSIRatios(self):
return self._opposite_SI_log_mean
class ExcludedExonData(ExonData):
def __init__(self, splicing_index, geneid, normIntensityP):
self._score = splicing_index;
self._geneid = geneid;
self._normIntensityP = normIntensityP
def getAllPossibleLinearRegressionScores(probeset1, probeset2, positions, group_sizes):
### Get Raw expression values for the two probests
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
all_possible_scores = [];
index1 = 0 ### Perform all possible pairwise comparisons between groups (not sure how this will work for 10+ groups)
for (pos1a, pos2a) in positions:
index2 = 0
for (pos1b, pos2b) in positions:
if pos1a != pos1b:
p1_g1 = p1_exp[pos1a:pos2a];
p1_g2 = p1_exp[pos1b:pos2b]
p2_g1 = p2_exp[pos1a:pos2a];
p2_g2 = p2_exp[pos1b:pos2b]
#log_fold, linregressP, rsqrd = getAllLinearRegressionScores(probeset1,probeset2,p1_g1,p2_g1,p1_g2,p2_g2,len(group_sizes)) ### Used to calculate a pairwise group pvalue
log_fold, rsqrd = performLinearRegression(p1_g1, p2_g1, p1_g2, p2_g2)
if log_fold < 0:
i1, i2 = index2, index1 ### all scores should indicate upregulation
else:
i1, i2 = index1, index2
all_possible_scores.append((abs(log_fold), i1, i2))
index2 += 1
index1 += 1
all_possible_scores.sort()
try:
log_fold, index1, index2 = all_possible_scores[-1]
except Exception:
log_fold = 0; index1 = 0; index2 = 0
return log_fold, index1, index2
def getLinearRegressionScores(probeset1, probeset2, group_sizes):
### Get Raw expression values for the two probests
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
try:
p1_g1 = p1_exp[:group_sizes[0]];
p1_g2 = p1_exp[group_sizes[0]:]
p2_g1 = p2_exp[:group_sizes[0]];
p2_g2 = p2_exp[group_sizes[0]:]
except Exception:
print probeset1, probeset2
print p1_exp
print p2_exp
print group_sizes
force_kill
log_fold, linregressP, rsqrd = getAllLinearRegressionScores(probeset1, probeset2, p1_g1, p2_g1, p1_g2, p2_g2, 2)
return log_fold, linregressP, rsqrd
def getAllLinearRegressionScores(probeset1, probeset2, p1_g1, p2_g1, p1_g2, p2_g2, groups):
log_fold, rsqrd = performLinearRegression(p1_g1, p2_g1, p1_g2, p2_g2)
try:
### Repeat for each sample versus baselines to calculate a p-value
index = 0;
group1_scores = []
for p1_g1_sample in p1_g1:
p2_g1_sample = p2_g1[index]
log_f, rs = performLinearRegression(p1_g1, p2_g1, [p1_g1_sample], [p2_g1_sample])
group1_scores.append(log_f);
index += 1
index = 0;
group2_scores = []
for p1_g2_sample in p1_g2:
p2_g2_sample = p2_g2[index]
log_f, rs = performLinearRegression(p1_g1, p2_g1, [p1_g2_sample], [p2_g2_sample])
group2_scores.append(log_f);
index += 1
try:
linregressP = statistics.runComparisonStatistic(group1_scores, group2_scores, probability_statistic)
except Exception:
linregressP = 0;
group1_scores = [0];
group2_scores = [log_fold]
if linregressP == 1: linregressP = 0
except Exception:
linregressP = 0;
group1_scores = [0];
group2_scores = [log_fold]
if export_NI_values == 'yes' and groups == 2:
group1_scores = stringListConvert(group1_scores)
group2_scores = stringListConvert(group2_scores)
ev = string.join([probeset1, probeset2] + group1_scores + group2_scores, '\t') + '\n';
NIdata_export.write(ev)
return log_fold, linregressP, rsqrd
def performLinearRegression(p1_g1, p2_g1, p1_g2, p2_g2):
return_rsqrd = 'no'
if use_R == 'yes': ###Uses the RLM algorithm
#print "Performing Linear Regression analysis using rlm."
g1_slope = statistics.LinearRegression(p1_g1, p2_g1, return_rsqrd)
g2_slope = statistics.LinearRegression(p1_g2, p2_g2, return_rsqrd)
else: ###Uses a basic least squared method
#print "Performing Linear Regression analysis using python specific methods."
g1_slope = statistics.simpleLinRegress(p1_g1, p2_g1)
g2_slope = statistics.simpleLinRegress(p1_g2, p2_g2)
log_fold = statistics.convert_to_log_fold(g2_slope / g1_slope)
rsqrd = 'proceed'
#if g1_rsqrd > 0 and g2_rsqrd > 0: rsqrd = 'proceed'
#else: rsqrd = 'hault'
return log_fold, rsqrd
########### Permutation Analysis Functions ###########
def permuteLinearRegression(probeset1, probeset2, p):
p1_exp = array_raw_group_values[probeset1]
p2_exp = array_raw_group_values[probeset2]
p1_g1, p1_g2 = permute_samples(p1_exp, p)
p2_g1, p2_g2 = permute_samples(p2_exp, p)
return_rsqrd = 'no'
if use_R == 'yes': ###Uses the RLM algorithm
g1_slope = statistics.LinearRegression(p1_g1, p2_g1, return_rsqrd)
g2_slope = statistics.LinearRegression(p1_g2, p2_g2, return_rsqrd)
else: ###Uses a basic least squared method
g1_slope = statistics.simpleLinRegress(p1_g1, p2_g1)
g2_slope = statistics.simpleLinRegress(p1_g2, p2_g2)
log_fold = statistics.convert_to_log_fold(g2_slope / g1_slope)
return log_fold
def permuteSplicingScores(splice_event_list):
p_value_call = 'lowest_raw_p'
permute_p_values = {};
splice_event_list2 = []
if len(permute_lists) > 0:
#tuple_data in splice_event_list = dI,probeset1,probeset2,y,event_call,critical_exon_list
all_samples = [];
a = 0
for (score, x) in splice_event_list:
###NOTE: This reference dI differs slightly from the below calculated, since the values are calculated from raw relative ratios rather than the avg
###Solution: Use the first calculated dI as the reference
score = score * (-1) ### Reverse the score to make equivalent to splicing-index and FIRMA scores
ref_splice_val = score;
probeset1 = x.Probeset1();
probeset2 = x.Probeset2();
affygene = x.GeneID()
y = 0;
p_splice_val_dist = [];
count = 0;
return_rsqrd = 'no'
for p in permute_lists: ###There are two lists in each entry
count += 1
permute = 'yes'
if analysis_method == 'ASPIRE':
p_splice_val = permute_ASPIRE_filtered(affygene, probeset1, probeset2, p, y, ref_splice_val, x)
elif analysis_method == 'linearregres':
slope_ratio = permuteLinearRegression(probeset1, probeset2, p)
p_splice_val = slope_ratio
if p_splice_val != 'null': p_splice_val_dist.append(p_splice_val)
y += 1
p_splice_val_dist.sort()
new_ref_splice_val = str(abs(ref_splice_val));
new_ref_splice_val = float(new_ref_splice_val[0:8]) #otherwise won't match up the scores correctly
if analysis_method == 'linearregres':
if ref_splice_val < 0:
p_splice_val_dist2 = []
for val in p_splice_val_dist: p_splice_val_dist2.append(-1 * val)
p_splice_val_dist = p_splice_val_dist2;
p_splice_val_dist.reverse()
p_val, pos_permute, total_permute, greater_than_true_permute = statistics.permute_p(p_splice_val_dist,
new_ref_splice_val,
len(permute_lists))
#print p_val,ref_splice_val, pos_permute, total_permute, greater_than_true_permute,p_splice_val_dist[-3:];kill
###When two groups are of equal size, there will be 2 pos_permutes rather than 1
if len(permute_lists[0][0]) == len(permute_lists[0][1]):
greater_than_true_permute = (pos_permute / 2) - 1 #size of the two groups are equal
else:
greater_than_true_permute = (pos_permute) - 1
if analysis_method == 'linearregres': greater_than_true_permute = (
pos_permute) - 1 ###since this is a one sided test, unlike ASPIRE
###Below equation is fine if the population is large
permute_p_values[(probeset1, probeset2)] = [p_val, pos_permute, total_permute, greater_than_true_permute]
###Remove non-significant linear regression results
if analysis_method == 'linearregres':
if p_val <= permute_p_threshold or greater_than_true_permute < 2: splice_event_list2.append(
(score, x)) ###<= since many p=0.05
print "Number of permutation p filtered splice event:", len(splice_event_list2)
if len(permute_p_values) > 0: p_value_call = 'permuted_aspire_p-value'
if analysis_method == 'linearregres': splice_event_list = splice_event_list2
return splice_event_list, p_value_call, permute_p_values
def permute_ASPIRE_filtered(affygene, probeset1, probeset2, p, y, ref_splice_val, x):
### Get raw expression values for each permuted group for the two probesets
b1, e1 = permute_dI(array_raw_group_values[probeset1], p)
try:
b2, e2 = permute_dI(array_raw_group_values[probeset2], p)
except IndexError:
print probeset2, array_raw_group_values[probeset2], p; kill
### Get the average constitutive expression values (averaged per-sample across probesets) for each permuted group
try:
bc, ec = permute_dI(avg_const_exp_db[affygene], p)
except IndexError:
print affygene, avg_const_exp_db[affygene], p; kill
if factor_out_expression_changes == 'no':
ec = bc
### Analyze the averaged ratio's of junction expression relative to permuted constitutive expression
try:
p_splice_val = abs(
statistics.aspire_stringent(b1 / bc, e1 / ec, b2 / bc, e2 / ec)) ### This the permuted ASPIRE score
except Exception:
p_splice_val = 0
#print p_splice_val, ref_splice_val, probeset1, probeset2, affygene; dog
if y == 0: ###The first permutation is always the real one
### Grab the absolute number with small number of decimal places
try:
new_ref_splice_val = str(p_splice_val);
new_ref_splice_val = float(new_ref_splice_val[0:8])
ref_splice_val = str(abs(ref_splice_val));
ref_splice_val = float(ref_splice_val[0:8]);
y += 1
except ValueError:
###Only get this error if your ref_splice_val is a null
print y, probeset1, probeset2;
print ref_splice_val, new_ref_splice_val, p
print b1 / bc, e1 / ec, b2 / bc, e2 / ec;
print (b1 / bc) / (e1 / ec), (b2 / bc) / (e2 / ec)
print x[7], x[8], x[9], x[10];
kill
return p_splice_val
def permute_samples(a, p):
baseline = [];
experimental = []
for p_index in p[0]:
baseline.append(a[p_index]) ###Append expression values for each permuted list
for p_index in p[1]:
experimental.append(a[p_index])
return baseline, experimental
def permute_dI(all_samples, p):
baseline, experimental = permute_samples(all_samples, p)
#if get_non_log_avg == 'no':
gb = statistics.avg(baseline);
ge = statistics.avg(experimental) ###Group avg baseline, group avg experimental value
gb = statistics.log_fold_conversion_fraction(gb);
ge = statistics.log_fold_conversion_fraction(ge)
#else:
#baseline = statistics.log_fold_conversion_fraction(baseline); experimental = statistics.log_fold_conversion_fraction(experimental)
#gb = statistics.avg(baseline); ge = statistics.avg(experimental) ###Group avg baseline, group avg experimental value
return gb, ge
def format_exon_functional_attributes(affygene, critical_probeset_list, functional_attribute_db, up_exon_list,
down_exon_list, protein_length_list):
### Add functional attributes
functional_attribute_list2 = []
new_functional_attribute_str = ''
new_seq_attribute_str = ''
new_functional_attribute_list = []
if array_type == 'exon' or array_type == 'gene' or explicit_data_type != 'null':
critical_probesets = critical_probeset_list[0]
else:
critical_probesets = tuple(critical_probeset_list)
key = affygene, critical_probesets
if key in functional_attribute_db:
###Grab exon IDs corresponding to the critical probesets
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method:
try:
critical_exons = regulated_exon_junction_db[critical_probesets].CriticalExons() ###For junction arrays
except Exception:
print key, functional_attribute_db[key];kill
else:
critical_exons = [exon_db[critical_probesets].ExonID()] ###For exon arrays
for exon in critical_exons:
for entry in functional_attribute_db[key]:
x = 0
functional_attribute = entry[0]
call = entry[1] # +, -, or ~
if ('AA:' in functional_attribute) or ('ref' in functional_attribute):
x = 1
if exon in up_exon_list:
### design logic to determine whether up or down regulation promotes the functional change (e.g. NMD)
if 'ref' in functional_attribute:
new_functional_attribute = '(~)' + functional_attribute
data_tuple = new_functional_attribute, exon
elif call == '+' or call == '~':
new_functional_attribute = '(+)' + functional_attribute
data_tuple = new_functional_attribute, exon
elif call == '-':
new_functional_attribute = '(-)' + functional_attribute
data_tuple = new_functional_attribute, exon
if 'AA:' in functional_attribute and '?' not in functional_attribute:
functional_attribute_temp = functional_attribute[3:]
if call == '+' or call == '~':
val1, val2 = string.split(functional_attribute_temp, '->')
else:
val2, val1 = string.split(functional_attribute_temp, '->')
val1, null = string.split(val1, '(')
val2, null = string.split(val2, '(')
protein_length_list.append([val1, val2])
elif exon in down_exon_list:
if 'ref' in functional_attribute:
new_functional_attribute = '(~)' + functional_attribute
data_tuple = new_functional_attribute, exon
elif call == '+' or call == '~':
new_functional_attribute = '(-)' + functional_attribute
data_tuple = new_functional_attribute, exon
elif call == '-':
new_functional_attribute = '(+)' + functional_attribute
data_tuple = new_functional_attribute, exon
if 'AA:' in functional_attribute and '?' not in functional_attribute:
functional_attribute_temp = functional_attribute[3:]
if call == '+' or call == '~':
val2, val1 = string.split(functional_attribute_temp, '->')
else:
val1, val2 = string.split(functional_attribute_temp, '->')
val1, null = string.split(val1, '(')
val2, null = string.split(val2, '(')
protein_length_list.append([val1, val2])
if x == 0 or (exclude_protein_details != 'yes'):
try:
new_functional_attribute_list.append(new_functional_attribute)
except UnboundLocalError:
print entry
print up_exon_list, down_exon_list
print exon, critical_exons
print critical_probesets, (key, affygene, critical_probesets)
for i in functional_attribute_db:
print i, functional_attribute_db[i];
kill
###remove protein sequence prediction_data
if 'sequence' not in data_tuple[0]:
if x == 0 or exclude_protein_details == 'no':
functional_attribute_list2.append(data_tuple)
###Get rid of duplicates, but maintain non-alphabetical order
new_functional_attribute_list2 = []
for entry in new_functional_attribute_list:
if entry not in new_functional_attribute_list2:
new_functional_attribute_list2.append(entry)
new_functional_attribute_list = new_functional_attribute_list2
#new_functional_attribute_list = unique.unique(new_functional_attribute_list)
#new_functional_attribute_list.sort()
for entry in new_functional_attribute_list:
if 'sequence' in entry:
new_seq_attribute_str = new_seq_attribute_str + entry + ','
else:
new_functional_attribute_str = new_functional_attribute_str + entry + ','
new_seq_attribute_str = new_seq_attribute_str[0:-1]
new_functional_attribute_str = new_functional_attribute_str[0:-1]
return new_functional_attribute_str, functional_attribute_list2, new_seq_attribute_str, protein_length_list
def grab_summary_dataset_annotations(functional_attribute_db, comparison_db, include_truncation_results_specifically):
###If a second filtering database present, filter the 1st database based on protein length changes
fa_db = {};
cp_db = {} ###index the geneids for efficient recall in the next segment of code
for (affygene, annotation) in functional_attribute_db:
try:
fa_db[affygene].append(annotation)
except KeyError:
fa_db[affygene] = [annotation]
for (affygene, annotation) in comparison_db:
try:
cp_db[affygene].append(annotation)
except KeyError:
cp_db[affygene] = [annotation]
functional_attribute_db_exclude = {}
for affygene in fa_db:
if affygene in cp_db:
for annotation2 in cp_db[affygene]:
if ('trunc' in annotation2) or ('frag' in annotation2) or ('NMDs' in annotation2):
try:
functional_attribute_db_exclude[affygene].append(annotation2)
except KeyError:
functional_attribute_db_exclude[affygene] = [annotation2]
functional_annotation_db = {}
for (affygene, annotation) in functional_attribute_db:
### if we wish to filter the 1st database based on protein length changes
if affygene not in functional_attribute_db_exclude:
try:
functional_annotation_db[annotation] += 1
except KeyError:
functional_annotation_db[annotation] = 1
elif include_truncation_results_specifically == 'yes':
for annotation_val in functional_attribute_db_exclude[affygene]:
try:
functional_annotation_db[annotation_val] += 1
except KeyError:
functional_annotation_db[annotation_val] = 1
annotation_list = []
annotation_list_ranked = []
for annotation in functional_annotation_db:
if 'micro' not in annotation:
count = functional_annotation_db[annotation]
annotation_list.append((annotation, count))
annotation_list_ranked.append((count, annotation))
annotation_list_ranked.sort();
annotation_list_ranked.reverse()
return annotation_list, annotation_list_ranked
def reorganize_attribute_entries(attribute_db1, build_attribute_direction_databases):
attribute_db2 = {};
inclusion_attributes_hit_count = {};
exclusion_attributes_hit_count = {}
genes_with_inclusion_attributes = {};
genes_with_exclusion_attributes = {};
###This database has unique gene, attribute information. No attribute will now be represented more than once per gene
for key in attribute_db1:
###Make gene the key and attribute (functional elements or protein information), along with the associated exons the values
affygene = key[0];
exon_attribute = key[1];
exon_list = attribute_db1[key]
exon_list = unique.unique(exon_list);
exon_list.sort()
attribute_exon_info = exon_attribute, exon_list #e.g. 5'UTR, [E1,E2,E3]
try:
attribute_db2[affygene].append(attribute_exon_info)
except KeyError:
attribute_db2[affygene] = [attribute_exon_info]
###Separate out attribute data by direction for over-representation analysis
if build_attribute_direction_databases == 'yes':
direction = exon_attribute[1:2];
unique_gene_attribute = exon_attribute[3:]
if direction == '+':
try:
inclusion_attributes_hit_count[unique_gene_attribute].append(affygene)
except KeyError:
inclusion_attributes_hit_count[unique_gene_attribute] = [affygene]
genes_with_inclusion_attributes[affygene] = []
if direction == '-':
try:
exclusion_attributes_hit_count[unique_gene_attribute].append(affygene)
except KeyError:
exclusion_attributes_hit_count[unique_gene_attribute] = [affygene]
genes_with_exclusion_attributes[affygene] = []
inclusion_attributes_hit_count = eliminate_redundant_dict_values(inclusion_attributes_hit_count)
exclusion_attributes_hit_count = eliminate_redundant_dict_values(exclusion_attributes_hit_count)
"""for key in inclusion_attributes_hit_count:
inclusion_attributes_hit_count[key] = len(inclusion_attributes_hit_count[key])
for key in exclusion_attributes_hit_count:
exclusion_attributes_hit_count[key] = len(exclusion_attributes_hit_count[key])"""
if build_attribute_direction_databases == 'yes':
return attribute_db2, inclusion_attributes_hit_count, genes_with_inclusion_attributes, exclusion_attributes_hit_count, genes_with_exclusion_attributes
else:
return attribute_db2
########### Misc. Functions ###########
def eliminate_redundant_dict_values(database):
db1 = {}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def add_a_space(string):
if len(string) < 1:
string = ' '
return string
def convertToLog2(data_list):
return map(lambda x: math.log(float(x), 2), data_list)
def addGlobalFudgeFactor(data_list, data_type):
new_list = []
if data_type == 'log':
for item in data_list:
new_item = statistics.log_fold_conversion_fraction(item)
new_list.append(float(new_item) + global_addition_factor)
new_list = convertToLog2(new_list)
else:
for item in data_list: new_list.append(float(item) + global_addition_factor)
return new_list
def copyDirectoryPDFs(root_dir, AS='AS'):
directories = ['AltResults/AlternativeOutputDirectoryDescription.pdf',
'AltResultsDirectoryDescription.pdf',
'ClusteringDirectoryDescription.pdf',
'ExpressionInputDirectoryDescription.pdf',
'ExpressionOutputDirectoryDescription.pdf',
'GO-Elite/GO-Elite_resultsDirectoryDescription.pdf',
'GO-EliteDirectoryDescription.pdf',
'RootDirectoryDescription.pdf']
import shutil
for dir in directories:
file = string.split(dir, '/')[-1]
proceed = True
if 'AltResult' in dir and AS != 'AS': proceed = False
if proceed:
try:
shutil.copyfile(filepath('Documentation/DirectoryDescription/' + file), filepath(root_dir + dir))
except Exception:
pass
def restrictProbesets(dataset_name):
### Take a file with probesets and only perform the splicing-analysis on these (e.g. those already identified from a previous run with a specific pattern)
### Allows for propper denominator when calculating z-scores for microRNA and protein-domain ORA
probeset_list_filename = import_dir = '/AltDatabaseNoVersion/filtering';
filtered_probeset_db = {}
if array_type == 'RNASeq':
id_name = 'exon/junction IDs'
else:
id_name = 'array IDs'
try:
dir_list = read_directory(import_dir)
fn_dir = filepath(import_dir[1:])
except Exception:
dir_list = []; fn_dir = ''
if len(dir_list) > 0:
for file in dir_list:
if file[:-4] in dataset_name:
fn = fn_dir + '/' + file;
fn = string.replace(fn, 'AltDatabase', 'AltDatabaseNoVersion')
filtered_probeset_db = importGeneric(fn)
print len(filtered_probeset_db), id_name, "will be used to restrict analysis..."
return filtered_probeset_db
def RunAltAnalyze():
#print altanalyze_files
#print '!!!!!starting to run alt-exon analysis'
#returnLargeGlobalVars()
global annotate_db;
annotate_db = {};
global splice_event_list;
splice_event_list = [];
residuals_dirlist = []
global dataset_name;
global constitutive_probeset_db;
global exon_db;
dir_list2 = [];
import_dir2 = ''
if array_type == 'AltMouse':
import_dir = root_dir + 'AltExpression/' + array_type
elif array_type == 'exon':
import_dir = root_dir + 'AltExpression/ExonArray/' + species + '/'
elif array_type == 'gene':
import_dir = root_dir + 'AltExpression/GeneArray/' + species + '/'
elif array_type == 'junction':
import_dir = root_dir + 'AltExpression/JunctionArray/' + species + '/'
else:
import_dir = root_dir + 'AltExpression/' + array_type + '/' + species + '/'
#if analysis_method == 'ASPIRE' or analysis_method == 'linearregres' or analysis_method == 'splicing-index':
if array_type != 'AltMouse':
gene_annotation_file = "AltDatabase/ensembl/" + species + "/" + species + "_Ensembl-annotations.txt"
else:
gene_annotation_file = "AltDatabase/" + species + "/" + array_type + "/" + array_type + "_gene_annotations.txt"
annotate_db = ExonAnalyze_module.import_annotations(gene_annotation_file, array_type)
###Import probe-level associations
exon_db = {};
filtered_arrayids = {};
filter_status = 'no'
try:
constitutive_probeset_db, exon_db, genes_being_analyzed = importSplicingAnnotationDatabase(
probeset_annotations_file, array_type, filtered_arrayids, filter_status)
except IOError:
print_out = 'The annotation database: \n' + probeset_annotations_file + '\nwas not found. Ensure this file was not deleted and that the correct species has been selected.'
try:
UI.WarningWindow(print_out, 'Exit'); print print_out
except Exception:
print print_out
print traceback.format_exc()
badExit()
run = 0
### Occurs when analyzing multiple conditions rather than performing a simple pair-wise comparison
if run_from_scratch == 'Annotate External Results':
import_dir = root_dir
elif analyze_all_conditions == 'all groups':
import_dir = string.replace(import_dir, 'AltExpression', 'AltExpression/FullDatasets')
if array_type == 'AltMouse':
import_dir = string.replace(import_dir, 'FullDatasets/AltMouse', 'FullDatasets/AltMouse/Mm')
elif analyze_all_conditions == 'both':
import_dir2 = string.replace(import_dir, 'AltExpression', 'AltExpression/FullDatasets')
if array_type == 'AltMouse':
import_dir2 = string.replace(import_dir2, 'FullDatasets/AltMouse', 'FullDatasets/AltMouse/Mm')
try:
dir_list2 = read_directory(
import_dir2) #send a sub_directory to a function to identify all files in a directory
except Exception:
try:
if array_type == 'exon':
array_type_dir = 'ExonArray'
elif array_type == 'gene':
array_type_dir = 'GeneArray'
elif array_type == 'junction':
array_type_dir = 'GeneArray'
else:
array_type_dir = array_type
import_dir2 = string.replace(import_dir2, 'AltExpression/' + array_type_dir + '/' + species + '/', '')
import_dir2 = string.replace(import_dir2, 'AltExpression/' + array_type_dir + '/', '');
dir_list2 = read_directory(import_dir2)
except Exception:
print_out = 'The expression files were not found. Please make\nsure you selected the correct species and array type.\n\nselected species: ' + species + '\nselected array type: ' + array_type + '\nselected directory:' + import_dir2
try:
UI.WarningWindow(print_out, 'Exit'); print print_out
except Exception:
print print_out
print traceback.format_exc()
badExit()
try:
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
except Exception:
try:
if array_type == 'exon':
array_type_dir = 'ExonArray'
elif array_type == 'gene':
array_type_dir = 'GeneArray'
elif array_type == 'junction':
array_type_dir = 'JunctionArray'
else:
array_type_dir = array_type
import_dir = string.replace(import_dir, 'AltExpression/' + array_type_dir + '/' + species + '/', '')
import_dir = string.replace(import_dir, 'AltExpression/' + array_type_dir + '/', '');
try:
dir_list = read_directory(import_dir)
except Exception:
import_dir = root_dir
dir_list = read_directory(
root_dir) ### Occurs when reading in an AltAnalyze filtered file under certain conditions
except Exception:
print_out = 'The expression files were not found. Please make\nsure you selected the correct species and array type.\n\nselected species: ' + species + '\nselected array type: ' + array_type + '\nselected directory:' + import_dir
try:
UI.WarningWindow(print_out, 'Exit')
except Exception:
print print_out
print traceback.format_exc()
badExit()
dir_list += dir_list2
### Capture the corresponding files in the residual dir to make sure these files exist for all comparisons - won't if FIRMA was run on some files
if analysis_method == 'FIRMA':
try:
residual_dir = root_dir + 'AltExpression/FIRMA/residuals/' + array_type + '/' + species + '/'
residuals_dirlist = read_directory(residual_dir)
except Exception:
null = []
try:
residual_dir = root_dir + 'AltExpression/FIRMA/FullDatasets/' + array_type + '/' + species + '/'
residuals_dirlist += read_directory(residual_dir)
except Exception:
null = []
dir_list_verified = []
for file in residuals_dirlist:
for filename in dir_list:
if file[:-4] in filename: dir_list_verified.append(filename)
dir_list = unique.unique(dir_list_verified)
junction_biotype = 'no'
if array_type == 'RNASeq':
### Check to see if user data includes junctions or just exons
for probeset in exon_db:
if '-' in probeset: junction_biotype = 'yes'; break
if junction_biotype == 'no' and analysis_method != 'splicing-index' and array_type == 'RNASeq':
dir_list = [] ### DON'T RUN ALTANALYZE WHEN JUST ANALYZING EXON DATA
print 'No junction data to summarize... proceeding with exon analysis\n'
elif len(dir_list) == 0:
print_out = 'No expression files available in the input directory:\n' + root_dir
try:
UI.WarningWindow(print_out, 'Exit'); print print_out
except Exception:
print print_out
badExit()
dir_list = filterAltExpressionFiles(dir_list,
altanalyze_files) ### Looks to see if the AltExpression files are for this run or from an older run
for altanalyze_input in dir_list: #loop through each file in the directory to output results
###Import probe-level associations
if 'cel_files' in altanalyze_input:
print_out = 'The AltExpression directory containing the necessary import file(s) is missing. Please verify the correct parameters and input directory were selected. If this error persists, contact us.'
try:
UI.WarningWindow(print_out, 'Exit'); print print_out
except Exception:
print print_out
badExit()
if run > 0: ### Only re-set these databases after the run when batch analysing multiple files
exon_db = {};
filtered_arrayids = {};
filter_status = 'no' ###Use this as a means to save memory (import multiple times - only storing different types relevant information)
constitutive_probeset_db, exon_db, genes_being_analyzed = importSplicingAnnotationDatabase(
probeset_annotations_file, array_type, filtered_arrayids, filter_status)
if altanalyze_input in dir_list2:
dataset_dir = import_dir2 + '/' + altanalyze_input ### Then not a pairwise comparison
else:
dataset_dir = import_dir + '/' + altanalyze_input
dataset_name = altanalyze_input[:-4] + '-'
print "Beginning to process", dataset_name[0:-1]
### If the user want's to restrict the analysis to preselected probesets (e.g., limma or FIRMA analysis selected)
global filtered_probeset_db;
filtered_probeset_db = {}
try:
filtered_probeset_db = restrictProbesets(dataset_name)
except Exception:
null = []
if run_from_scratch != 'Annotate External Results':
###Import expression data and stats and filter the expression data based on fold and p-value OR expression threshold
try:
conditions, adj_fold_dbase, nonlog_NI_db, dataset_name, gene_expression_diff_db, midas_db, ex_db, si_db = performExpressionAnalysis(
dataset_dir, constitutive_probeset_db, exon_db, annotate_db, dataset_name)
except IOError:
#except Exception,exception:
#print exception
print traceback.format_exc()
print_out = 'The AltAnalyze filtered expression file "' + dataset_name + '" is not propperly formatted. Review formatting requirements if this file was created by another application.'
try:
UI.WarningWindow(print_out, 'Exit'); print print_out
except Exception:
print print_out
badExit()
else:
conditions = 0;
adj_fold_dbase = {};
nonlog_NI_db = {};
gene_expression_diff_db = {};
ex_db = {};
si_db = {}
defineEmptyExpressionVars(exon_db);
adj_fold_dbase = original_fold_dbase
###Run Analysis
summary_results_db, summary_results_db2, aspire_output, aspire_output_gene, number_events_analyzed = splicingAnalysisAlgorithms(
nonlog_NI_db, adj_fold_dbase, dataset_name, gene_expression_diff_db, exon_db, ex_db, si_db, dataset_dir)
aspire_output_list.append(aspire_output);
aspire_output_gene_list.append(aspire_output_gene)
try:
clearObjectsFromMemory(exon_db); clearObjectsFromMemory(
constitutive_probeset_db); constitutive_probeset_db = []
except Exception:
null = []
try:
clearObjectsFromMemory(last_exon_region_db);last_exon_region_db = []
except Exception:
null = []
try:
clearObjectsFromMemory(adj_fold_dbase);adj_fold_dbase = []; clearObjectsFromMemory(
nonlog_NI_db);nonlog_NI_db = []
except Exception:
null = []
try:
clearObjectsFromMemory(gene_expression_diff_db);gene_expression_diff_db = []; clearObjectsFromMemory(
midas_db);midas_db = []
except Exception:
null = []
try:
clearObjectsFromMemory(ex_db);ex_db = []; clearObjectsFromMemory(si_db);si_db = []
except Exception:
null = []
try:
run += 1
except Exception:
run = 1
if run > 0: ###run = 0 if no filtered expression data present
try:
return summary_results_db, aspire_output_gene_list, number_events_analyzed
except Exception:
print_out = 'AltAnalyze was unable to find an expression dataset to analyze in:\n', import_dir, '\nor\n', import_dir2, '\nPlease re-run and select a valid input directory.'
try:
UI.WarningWindow(print_out, 'Exit'); print print_out
except Exception:
print print_out
badExit()
else:
try:
clearObjectsFromMemory(exon_db); clearObjectsFromMemory(
constitutive_probeset_db); constitutive_probeset_db = []
except Exception:
null = []
try:
clearObjectsFromMemory(last_exon_region_db);last_exon_region_db = []
except Exception:
null = []
return None
def filterAltExpressionFiles(dir_list, current_files):
dir_list2 = []
try:
if len(current_files) == 0: current_files = dir_list ###if no filenames input
for altanalzye_input in dir_list: #loop through each file in the directory to output results
if altanalzye_input in current_files:
dir_list2.append(altanalzye_input)
dir_list = dir_list2
except Exception:
dir_list = dir_list
return dir_list
def defineEmptyExpressionVars(exon_db):
global fold_dbase;
fold_dbase = {};
global original_fold_dbase;
global critical_exon_db;
critical_exon_db = {}
global midas_db;
midas_db = {};
global max_replicates;
global equal_replicates;
max_replicates = 0;
equal_replicates = 0
for probeset in exon_db: fold_dbase[probeset] = '', ''
original_fold_dbase = fold_dbase
def universalPrintFunction(print_items):
log_report = open(log_file, 'a')
for item in print_items:
if commandLineMode == 'no': ### Command-line has it's own log file write method (Logger)
log_report.write(item + '\n')
else:
print item
log_report.close()
class StatusWindow:
def __init__(self, root, expr_var, alt_var, goelite_var, additional_var, exp_file_location_db):
root.title('AltAnalyze version 2.0.9.3 beta')
statusVar = StringVar() ### Class method for Tkinter. Description: "Value holder for strings variables."
self.root = root
height = 450;
width = 500
if os.name != 'nt': height = 500; width = 600
self.sf = PmwFreeze.ScrolledFrame(root,
labelpos='n', label_text='Results Status Window',
usehullsize=1, hull_width=width, hull_height=height)
self.sf.pack(padx=5, pady=1, fill='both', expand=1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(), tag_text='Output')
group.pack(fill='both', expand=1, padx=10, pady=0)
Label(group.interior(), width=190, height=552, justify=LEFT, bg='black', fg='white', anchor=NW, padx=5, pady=5,
textvariable=statusVar).pack(fill=X, expand=Y)
status = StringVarFile(statusVar, root) ### Likely captures the stdout
sys.stdout = status
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset];
fl.setSTDOUT(sys.stdout)
root.after(100, AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db, root))
try:
root.protocol("WM_DELETE_WINDOW", self.deleteWindow)
root.mainloop()
except Exception:
pass
def deleteWindow(self):
try:
self.root.destroy()
except Exception:
pass
def quit(self):
try:
self.root.quit()
self.root.destroy()
except Exception:
pass
sys.exit()
def exportComparisonSummary(dataset_name, summary_data_dbase, return_type):
log_report = open(log_file, 'a')
result_list = []
for key in summary_data_dbase:
if key != 'QC': ### The value is a list of strings
summary_data_dbase[key] = str(summary_data_dbase[key])
d = 'Dataset name: ' + dataset_name[:-1];
result_list.append(d + '\n')
d = summary_data_dbase['gene_assayed'] + ':\tAll genes examined';
result_list.append(d)
d = summary_data_dbase['denominator_exp_genes'] + ':\tExpressed genes examined for AS';
result_list.append(d)
if explicit_data_type == 'exon-only':
d = summary_data_dbase['alt_events'] + ':\tAlternatively regulated probesets';
result_list.append(d)
d = summary_data_dbase['denominator_exp_events'] + ':\tExpressed probesets examined';
result_list.append(d)
elif (array_type == 'AltMouse' or array_type == 'junction' or array_type == 'RNASeq') and (
explicit_data_type == 'null' or return_type == 'print'):
d = summary_data_dbase['alt_events'] + ':\tAlternatively regulated junction-pairs';
result_list.append(d)
d = summary_data_dbase['denominator_exp_events'] + ':\tExpressed junction-pairs examined';
result_list.append(d)
else:
d = summary_data_dbase['alt_events'] + ':\tAlternatively regulated probesets';
result_list.append(d)
d = summary_data_dbase['denominator_exp_events'] + ':\tExpressed probesets examined';
result_list.append(d)
d = summary_data_dbase['alt_genes'] + ':\tAlternatively regulated genes (ARGs)';
result_list.append(d)
d = summary_data_dbase['direct_domain_genes'] + ':\tARGs - overlaping with domain/motifs';
result_list.append(d)
d = summary_data_dbase['miRNA_gene_hits'] + ':\tARGs - overlaping with microRNA binding sites';
result_list.append(d)
result_list2 = []
for d in result_list:
if explicit_data_type == 'exon-only':
d = string.replace(d, 'probeset', 'exon')
elif array_type == 'RNASeq':
d = string.replace(d, 'probeset', 'junction')
result_list2.append(d)
result_list = result_list2
if return_type == 'log':
for d in result_list: log_report.write(d + '\n')
log_report.write('\n')
log_report.close()
return result_list
class SummaryResultsWindow:
def __init__(self, tl, analysis_type, output_dir, dataset_name, output_type, summary_data_dbase):
def showLink(event):
try:
idx = int(event.widget.tag_names(CURRENT)[1]) ### This is just the index provided below (e.g., str(0))
#print [self.LINKS[idx]]
if 'http://' in self.LINKS[idx]:
webbrowser.open(self.LINKS[idx])
elif self.LINKS[idx][-1] == '/':
self.openSuppliedDirectory(self.LINKS[idx])
else:
### Instead of using this option to open a hyperlink (which is what it should do), we can open another Tk window
try:
self.viewPNGFile(self.LINKS[idx]) ### ImageTK PNG viewer
except Exception:
try:
self.ShowImageMPL(self.LINKS[idx]) ### MatPlotLib based dispaly
except Exception:
self.openPNGImage(self.LINKS[idx]) ### Native OS PNG viewer
#self.DisplayPlots(self.LINKS[idx]) ### GIF based dispaly
except Exception:
null = [] ### anomalous error
self.emergency_exit = False
self.LINKS = []
self.tl = tl
self.tl.title('AltAnalyze version 2.0.9 beta')
self.analysis_type = analysis_type
filename = 'Config/icon.gif'
fn = filepath(filename);
img = PhotoImage(file=fn)
can = Canvas(tl);
can.pack(side='top');
can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
use_scroll = 'yes'
try:
runGOElite = run_GOElite
except Exception:
runGOElite = 'decide_later'
if 'QC' in summary_data_dbase:
graphic_links = summary_data_dbase['QC'] ### contains hyperlinks to QC and Clustering plots
if len(graphic_links) == 0: del summary_data_dbase['QC'] ### This can be added if an analysis fails
else:
graphic_links = []
label_text_str = 'AltAnalyze Result Summary';
height = 150;
width = 500
if analysis_type == 'AS' or 'QC' in summary_data_dbase: height = 330
if analysis_type == 'AS' and 'QC' in summary_data_dbase: height = 330
self.sf = PmwFreeze.ScrolledFrame(tl,
labelpos='n', label_text=label_text_str,
usehullsize=1, hull_width=width, hull_height=height)
self.sf.pack(padx=5, pady=1, fill='both', expand=1)
self.frame = self.sf.interior()
txt = Text(self.frame, bg='gray', width=150, height=80)
txt.pack(expand=True, fill="both")
#txt.insert(END, 'Primary Analysis Finished....\n')
txt.insert(END, 'Results saved to:\n' + output_dir + '\n')
f = Font(family="System", size=12, weight="bold")
txt.tag_config("font", font=f)
i = 0
copyDirectoryPDFs(output_dir, AS=analysis_type)
if analysis_type == 'AS':
txt.insert(END, '\n')
result_list = exportComparisonSummary(dataset_name, summary_data_dbase, 'print')
for d in result_list: txt.insert(END, d + '\n')
if 'QC' in summary_data_dbase and len(graphic_links) > 0:
txt.insert(END, '\nQC and Expression Clustering Plots', "font")
txt.insert(END, '\n\n 1) ')
for (name, file_dir) in graphic_links:
txt.insert(END, name, ('link', str(i)))
if len(graphic_links) > (i + 1):
txt.insert(END, '\n %s) ' % str(i + 2))
self.LINKS.append(file_dir)
i += 1
txt.insert(END, '\n\nView all primary plots in the folder ')
txt.insert(END, 'DataPlots', ('link', str(i)));
i += 1
self.LINKS.append(output_dir + 'DataPlots/')
else:
url = 'http://code.google.com/p/altanalyze/'
self.LINKS = (url, '')
txt.insert(END, '\nFor more information see the ')
txt.insert(END, "AltAnalyze Online Help", ('link', str(0)))
txt.insert(END, '\n\n')
if runGOElite == 'run-immediately':
txt.insert(END, '\n\nView all pathway enrichment results in the folder ')
txt.insert(END, 'GO-Elite', ('link', str(i)));
i += 1
self.LINKS.append(output_dir + 'GO-Elite/')
if analysis_type == 'AS':
txt.insert(END, '\n\nView all splicing plots in the folder ')
txt.insert(END, 'ExonPlots', ('link', str(i)));
i += 1
self.LINKS.append(output_dir + 'ExonPlots/')
txt.tag_config('link', foreground="blue", underline=1)
txt.tag_bind('link', '<Button-1>', showLink)
txt.insert(END, '\n\n')
open_results_folder = Button(tl, text='Results Folder', command=self.openDirectory)
open_results_folder.pack(side='left', padx=5, pady=5);
if analysis_type == 'AS':
#self.dg_url = 'http://www.altanalyze.org/domaingraph.htm'
self.dg_url = 'http://www.altanalyze.org/domaingraph.htm'
dg_pdf_file = 'Documentation/domain_graph.pdf';
dg_pdf_file = filepath(dg_pdf_file);
self.dg_pdf_file = dg_pdf_file
text_button = Button(tl, text='Start DomainGraph in Cytoscape', command=self.SelectCytoscapeTopLevel)
text_button.pack(side='right', padx=5, pady=5)
self.output_dir = output_dir + "AltResults"
self.whatNext_url = 'http://code.google.com/p/altanalyze/wiki/AnalyzingASResults' #http://www.altanalyze.org/what_next_altexon.htm'
whatNext_pdf = 'Documentation/what_next_alt_exon.pdf';
whatNext_pdf = filepath(whatNext_pdf);
self.whatNext_pdf = whatNext_pdf
if output_type == 'parent': self.output_dir = output_dir ###Used for fake datasets
else:
if pathway_permutations == 'NA':
self.output_dir = output_dir + "ExpressionOutput"
else:
self.output_dir = output_dir
self.whatNext_url = 'http://code.google.com/p/altanalyze/wiki/AnalyzingGEResults' #'http://www.altanalyze.org/what_next_expression.htm'
whatNext_pdf = 'Documentation/what_next_GE.pdf';
whatNext_pdf = filepath(whatNext_pdf);
self.whatNext_pdf = whatNext_pdf
what_next = Button(tl, text='What Next?', command=self.whatNextlinkout)
what_next.pack(side='right', padx=5, pady=5)
quit_buttonTL = Button(tl, text='Close View', command=self.close)
quit_buttonTL.pack(side='right', padx=5, pady=5)
continue_to_next_win = Button(text='Continue', command=self.continue_win)
continue_to_next_win.pack(side='right', padx=10, pady=10)
quit_button = Button(root, text='Quit', command=self.quit)
quit_button.pack(side='right', padx=5, pady=5)
button_text = 'Help';
help_url = 'http://www.altanalyze.org/help_main.htm';
self.help_url = filepath(help_url)
pdf_help_file = 'Documentation/AltAnalyze-Manual.pdf';
pdf_help_file = filepath(pdf_help_file);
self.pdf_help_file = pdf_help_file
help_button = Button(root, text=button_text, command=self.Helplinkout)
help_button.pack(side='left', padx=5, pady=5)
if self.emergency_exit == False:
self.tl.protocol("WM_DELETE_WINDOW", self.tldeleteWindow)
self.tl.mainloop() ###Needed to show graphic
else:
""" This shouldn't have to be called, but is when the topLevel window isn't closed first
specifically if a PNG file is opened. the sys.exitfunc() should work but doesn't.
work on this more later """
#AltAnalyzeSetup('no')
try:
self._tls.quit(); self._tls.destroy()
except Exception:
None
try:
self._tlx.quit(); self._tlx.destroy()
except Exception:
None
try:
self._tlx.quit(); self._tlx.destroy()
except Exception:
None
try:
self.tl.quit(); self.tl.destroy()
except Exception:
None
try:
root.quit(); root.destroy()
except Exception:
None
UI.getUpdatedParameters(array_type, species, 'Process Expression file', output_dir)
sys.exit() ### required when opening PNG files on Windows to continue (not sure why)
#sys.exitfunc()
def tldeleteWindow(self):
try:
self.tl.quit(); self.tl.destroy()
except Exception:
self.tl.destroy()
def deleteTLWindow(self):
self.emergency_exit = True
try:
self._tls.quit(); self._tls.destroy()
except Exception:
None
try:
self._tlx.quit(); self._tlx.destroy()
except Exception:
None
self.tl.quit()
self.tl.destroy()
sys.exitfunc()
def deleteWindow(self):
self.emergency_exit = True
try:
self._tls.quit(); self._tls.destroy()
except Exception:
None
try:
self._tlx.quit(); self._tlx.destroy()
except Exception:
None
try:
self.tl.quit()
self.tl.destroy()
except Exception:
None
sys.exitfunc()
def continue_win(self):
self.emergency_exit = True
try:
self._tls.quit(); self._tls.destroy()
except Exception:
None
try:
self._tlx.quit(); self._tlx.destroy()
except Exception:
None
try:
self.tl.quit(); self.tl.destroy()
except Exception:
pass
root.quit()
root.destroy()
try:
self.tl.grid_forget()
except Exception:
None
try:
root.grid_forget()
except Exception:
None
sys.exitfunc()
def openDirectory(self):
if os.name == 'nt':
try:
os.startfile('"' + self.output_dir + '"')
except Exception:
os.system('open "' + self.output_dir + '"')
elif 'darwin' in sys.platform:
os.system('open "' + self.output_dir + '"')
elif 'linux' in sys.platform:
os.system('xdg-open "' + self.output_dir + '/"')
def openSuppliedDirectory(self, dir):
if os.name == 'nt':
try:
os.startfile('"' + self.output_dir + '"')
except Exception:
os.system('open "' + dir + '"')
elif 'darwin' in sys.platform:
os.system('open "' + dir + '"')
elif 'linux' in sys.platform:
os.system('xdg-open "' + dir + '/"')
def DGlinkout(self):
try:
altanalyze_path = filepath('') ### Find AltAnalye's path
altanalyze_path = altanalyze_path[:-1]
except Exception:
null = []
if os.name == 'nt':
parent_dir = 'C:/Program Files';
application_dir = 'Cytoscape_v';
application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform:
parent_dir = '/Applications';
application_dir = 'Cytoscape_v';
application_name = 'Cytoscape.app'
elif 'linux' in sys.platform:
parent_dir = '/opt';
application_dir = 'Cytoscape_v';
application_name = 'Cytoscape'
try:
openCytoscape(altanalyze_path, application_dir, application_name)
except Exception:
null = []
try:
self._tls.destroy()
except Exception:
None
try: ###Remove this cytoscape as the default
file_location_defaults = UI.importDefaultFileLocations()
del file_location_defaults['CytoscapeDir']
UI.exportDefaultFileLocations(file_location_defaults)
except Exception:
null = []
self.GetHelpTopLevel(self.dg_url, self.dg_pdf_file)
def Helplinkout(self):
self.GetHelpTopLevel(self.help_url, self.pdf_help_file)
def whatNextlinkout(self):
self.GetHelpTopLevel(self.whatNext_url, self.whatNext_pdf)
def ShowImageMPL(self, file_location):
""" Visualization method using MatPlotLib """
try:
import matplotlib
import matplotlib.pyplot as pylab
except Exception:
#print 'Graphical output mode disabled (requires matplotlib, numpy and scipy)'
None
fig = pylab.figure()
pylab.subplots_adjust(left=0.0, right=1.0, top=1.0, bottom=0.00) ### Fill the plot area left to right
ax = fig.add_subplot(111)
ax.set_xticks([]) ### Hides ticks
ax.set_yticks([])
img = pylab.imread(file_location)
imgplot = pylab.imshow(img)
pylab.show()
def viewPNGFile(self, png_file_dir):
""" View PNG file within a PMW Tkinter frame """
import ImageTk
tlx = Toplevel();
self._tlx = tlx
sf = PmwFreeze.ScrolledFrame(tlx, labelpos='n', label_text='',
usehullsize=1, hull_width=800, hull_height=550)
sf.pack(padx=0, pady=0, fill='both', expand=1)
frame = sf.interior()
tlx.title(png_file_dir)
img = ImageTk.PhotoImage(file=png_file_dir)
can = Canvas(frame)
can.pack(fill=BOTH, padx=0, pady=0)
w = img.width()
h = height = img.height()
can.config(width=w, height=h)
can.create_image(2, 2, image=img, anchor=NW)
tlx.mainloop()
def openPNGImage(self, png_file_dir):
if os.name == 'nt':
try:
os.startfile('"' + png_file_dir + '"')
except Exception:
os.system('open "' + png_file_dir + '"')
elif 'darwin' in sys.platform:
os.system('open "' + png_file_dir + '"')
elif 'linux' in sys.platform:
os.system('xdg-open "' + png_file_dir + '"')
def DisplayPlots(self, file_location):
""" Native Tkinter method - Displays a gif file in a standard TopLevel window (nothing fancy) """
tls = Toplevel();
self._tls = tls;
nulls = '\t\t\t\t';
tls.title('AltAnalyze Plot Visualization')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos='n', label_text='', usehullsize=1, hull_width=520, hull_height=500)
self.sf.pack(padx=5, pady=1, fill='both', expand=1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(), tag_text=file_location)
group.pack(fill='both', expand=1, padx=10, pady=0)
img = PhotoImage(file=filepath(file_location))
can = Canvas(group.interior());
can.pack(side='left', padx=10, pady=20);
can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
tls.mainloop()
def GetHelpTopLevel(self, url, pdf_file):
try:
config_db = UI.importConfigFile()
ask_for_help = config_db['help'] ### hide_selection_option
except Exception:
ask_for_help = 'null'; config_db = {}
self.pdf_file = pdf_file;
self.url = url
if ask_for_help == 'null':
message = '';
self.message = message;
self.online_help = 'Online Documentation';
self.pdf_help = 'Local PDF File'
tls = Toplevel();
self._tls = tls;
nulls = '\t\t\t\t';
tls.title('Please select one of the options')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos='n', label_text='', usehullsize=1, hull_width=320,
hull_height=200)
self.sf.pack(padx=5, pady=1, fill='both', expand=1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(), tag_text='Options')
group.pack(fill='both', expand=1, padx=10, pady=0)
filename = 'Config/icon.gif';
fn = filepath(filename);
img = PhotoImage(file=fn)
can = Canvas(group.interior());
can.pack(side='left', padx=10, pady=20);
can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
l1 = Label(group.interior(), text=nulls);
l1.pack(side='bottom')
text_button2 = Button(group.interior(), text=self.online_help, command=self.openOnlineHelp);
text_button2.pack(side='top', padx=5, pady=5)
try:
text_button = Button(group.interior(), text=self.pdf_help, command=self.openPDFHelp); text_button.pack(
side='top', padx=5, pady=5)
except Exception:
text_button = Button(group.interior(), text=self.pdf_help, command=self.openPDFHelp); text_button.pack(
side='top', padx=5, pady=5)
text_button3 = Button(group.interior(), text='No Thanks', command=self.skipHelp);
text_button3.pack(side='top', padx=5, pady=5)
c = Checkbutton(group.interior(), text="Apply these settings each time", command=self.setHelpConfig);
c.pack(side='bottom', padx=5, pady=0)
tls.mainloop()
try:
tls.destroy()
except Exception:
None
else:
file_location_defaults = UI.importDefaultFileLocations()
try:
help_choice = file_location_defaults['HelpChoice'].Location()
if help_choice == 'PDF':
self.openPDFHelp()
elif help_choice == 'http':
self.openOnlineHelp()
else:
self.skip()
except Exception:
self.openPDFHelp() ### Open PDF if there's a problem
def SelectCytoscapeTopLevel(self):
try:
config_db = UI.importConfigFile()
cytoscape_type = config_db['cytoscape'] ### hide_selection_option
except Exception:
cytoscape_type = 'null'; config_db = {}
if cytoscape_type == 'null':
message = '';
self.message = message
tls = Toplevel();
self._tls = tls;
nulls = '\t\t\t\t';
tls.title('Cytoscape Automatic Start Options')
self.sf = PmwFreeze.ScrolledFrame(self._tls,
labelpos='n', label_text='', usehullsize=1, hull_width=420,
hull_height=200)
self.sf.pack(padx=5, pady=1, fill='both', expand=1)
self.frame = self.sf.interior()
group = PmwFreeze.Group(self.sf.interior(), tag_text='Options')
group.pack(fill='both', expand=1, padx=10, pady=0)
filename = 'Config/cyto-logo-smaller.gif';
fn = filepath(filename);
img = PhotoImage(file=fn)
can = Canvas(group.interior());
can.pack(side='left', padx=10, pady=5);
can.config(width=img.width(), height=img.height())
can.create_image(2, 2, image=img, anchor=NW)
#"""
self.local_cytoscape = 'AltAnalyze Bundled Version';
self.custom_cytoscape = 'Previously Installed Version'
l1 = Label(group.interior(), text=nulls);
l1.pack(side='bottom')
l3 = Label(group.interior(), text='Select version of Cytoscape to open:');
l3.pack(side='top', pady=5)
"""
self.local_cytoscape = ' No '; self.custom_cytoscape = ' Yes '
l1 = Label(group.interior(), text=nulls); l1.pack(side = 'bottom')
l2 = Label(group.interior(), text='Note: Cytoscape can take up-to a minute to initalize', fg="red"); l2.pack(side = 'top', padx = 5, pady = 0)
"""
text_button2 = Button(group.interior(), text=self.local_cytoscape, command=self.DGlinkout);
text_button2.pack(padx=5, pady=5)
try:
text_button = Button(group.interior(), text=self.custom_cytoscape,
command=self.getPath); text_button.pack(padx=5, pady=5)
except Exception:
text_button = Button(group.interior(), text=self.custom_cytoscape,
command=self.getPath); text_button.pack(padx=5, pady=5)
l2 = Label(group.interior(), text='Note: Cytoscape can take up-to a minute to initalize', fg="blue");
l2.pack(side='bottom', padx=5, pady=0)
c = Checkbutton(group.interior(), text="Apply these settings each time and don't show again",
command=self.setCytoscapeConfig);
c.pack(side='bottom', padx=5, pady=0)
#c2 = Checkbutton(group.interior(), text = "Open PDF of DomainGraph help rather than online help", command=self.setCytoscapeConfig); c2.pack(side = 'bottom', padx = 5, pady = 0)
tls.mainloop()
try:
tls.destroy()
except Exception:
None
else:
file_location_defaults = UI.importDefaultFileLocations()
try:
cytoscape_app_dir = file_location_defaults['CytoscapeDir'].Location(); openFile(cytoscape_app_dir)
except Exception:
try:
altanalyze_path = filepath(''); altanalyze_path = altanalyze_path[:-1]
except Exception:
altanalyze_path = ''
application_dir = 'Cytoscape_v'
if os.name == 'nt':
application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform:
application_name = 'Cytoscape.app'
elif 'linux' in sys.platform:
application_name = 'Cytoscape'
try:
openCytoscape(altanalyze_path, application_dir, application_name)
except Exception:
null = []
def setCytoscapeConfig(self):
config_db = {};
config_db['cytoscape'] = 'hide_selection_option'
UI.exportConfigFile(config_db)
def setHelpConfig(self):
config_db = {};
config_db['help'] = 'hide_selection_option'
UI.exportConfigFile(config_db)
def getPath(self):
file_location_defaults = UI.importDefaultFileLocations()
if os.name == 'nt':
parent_dir = 'C:/Program Files'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.exe'
elif 'darwin' in sys.platform:
parent_dir = '/Applications'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape.app'
elif 'linux' in sys.platform:
parent_dir = '/opt'; application_dir = 'Cytoscape_v'; application_name = 'Cytoscape'
try:
self.default_dir = file_location_defaults['CytoscapeDir'].Location()
self.default_dir = string.replace(self.default_dir, '//', '/')
self.default_dir = string.replace(self.default_dir, '\\', '/')
self.default_dir = string.join(string.split(self.default_dir, '/')[:-1], '/')
except Exception:
dir = FindDir(parent_dir, application_dir);
dir = filepath(parent_dir + '/' + dir)
self.default_dir = filepath(parent_dir)
try:
dirPath = tkFileDialog.askdirectory(parent=self._tls, initialdir=self.default_dir)
except Exception:
self.default_dir = ''
try:
dirPath = tkFileDialog.askdirectory(parent=self._tls, initialdir=self.default_dir)
except Exception:
try:
dirPath = tkFileDialog.askdirectory(parent=self._tls)
except Exception:
dirPath = ''
try:
#print [dirPath],application_name
app_dir = dirPath + '/' + application_name
if 'linux' in sys.platform:
try:
createCytoscapeDesktop(cytoscape_dir)
except Exception:
null = []
dir_list = unique.read_directory('/usr/bin/') ### Check to see that JAVA is installed
if 'java' not in dir_list: print 'Java not referenced in "usr/bin/. If not installed,\nplease install and re-try opening Cytoscape'
try:
jar_path = dirPath + '/cytoscape.jar'
main_path = dirPath + '/cytoscape.CyMain'
plugins_path = dirPath + '/plugins'
os.system(
'java -Dswing.aatext=true -Xss5M -Xmx512M -jar ' + jar_path + ' ' + main_path + ' -p ' + plugins_path + ' &')
print 'Cytoscape jar opened:', jar_path
except Exception:
print 'OS command to open Java failed.'
try:
openFile(app_dir2); print 'Cytoscape opened:', app_dir2
except Exception:
openFile(app_dir)
else:
openFile(app_dir)
try:
file_location_defaults['CytoscapeDir'].SetLocation(app_dir)
except Exception:
fl = UI.FileLocationData('', app_dir, 'all')
file_location_defaults['CytoscapeDir'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
except Exception:
null = []
try:
self._tls.destroy()
except Exception:
None
self.GetHelpTopLevel(self.dg_url, self.dg_pdf_file)
def openOnlineHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try:
file_location_defaults['HelpChoice'].SetLocation('http')
except Exception:
fl = UI.FileLocationData('', 'http', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
webbrowser.open(self.url)
#except Exception: null=[]
try:
self._tls.destroy()
except Exception:
None
def skipHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try:
file_location_defaults['HelpChoice'].SetLocation('skip')
except Exception:
fl = UI.FileLocationData('', 'skip', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
try:
self._tls.destroy()
except Exception:
None
def openPDFHelp(self):
file_location_defaults = UI.importDefaultFileLocations()
try:
file_location_defaults['HelpChoice'].SetLocation('PDF')
except Exception:
fl = UI.FileLocationData('', 'PDF', 'all')
file_location_defaults['HelpChoice'] = fl
UI.exportDefaultFileLocations(file_location_defaults)
if os.name == 'nt':
try:
os.startfile('"' + self.pdf_file + '"')
except Exception:
os.system('open "' + self.pdf_file + '"')
elif 'darwin' in sys.platform:
os.system('open "' + self.pdf_file + '"')
elif 'linux' in sys.platform:
os.system('xdg-open "' + self.pdf_file + '"')
try:
self._tls.destroy()
except Exception:
None
def quit(self):
root.quit()
root.destroy()
sys.exit()
def close(self):
#self.tl.quit() #### This was causing multiple errors in 2.0.7 - evaluate more!
self.tl.destroy()
class StringVarFile:
def __init__(self, stringVar, window):
self.__newline = 0;
self.__stringvar = stringVar;
self.__window = window
def write(self, s):
try:
log_report = open(log_file, 'a')
log_report.write(s);
log_report.close() ### Variable to record each print statement
new = self.__stringvar.get()
for c in s:
#if c == '\n': self.__newline = 1
if c == '\k':
self.__newline = 1### This should not be found and thus results in a continous feed rather than replacing a single line
else:
if self.__newline: new = ""; self.__newline = 0
new = new + c
self.set(new)
except Exception:
pass
def set(self, s):
self.__stringvar.set(s); self.__window.update()
def get(self):
return self.__stringvar.get()
def flush(self):
pass
def timestamp():
import datetime
today = str(datetime.date.today());
today = string.split(today, '-');
today = today[0] + '' + today[1] + '' + today[2]
time_stamp = string.replace(time.ctime(), ':', '')
time_stamp = string.replace(time_stamp, ' ', ' ')
time_stamp = string.split(time_stamp, ' ') ###Use a time-stamp as the output dir (minus the day)
time_stamp = today + '-' + time_stamp[3]
return time_stamp
def callWXPython():
import wx
import AltAnalyzeViewer
app = wx.App(False)
AltAnalyzeViewer.remoteViewer(app)
def AltAnalyzeSetup(skip_intro):
global apt_location;
global root_dir;
global log_file;
global summary_data_db;
summary_data_db = {};
reload(UI)
global probability_statistic;
global commandLineMode;
commandLineMode = 'no'
if 'remoteViewer' == skip_intro:
if os.name == 'nt':
callWXPython()
elif os.name == 'ntX':
package_path = filepath('python')
win_package_path = string.replace(package_path, 'python', 'AltAnalyzeViewer.exe')
import subprocess
subprocess.call([win_package_path]);
sys.exit()
elif os.name == 'posix':
package_path = filepath('python')
#mac_package_path = string.replace(package_path,'python','AltAnalyze.app/Contents/MacOS/python')
#os.system(mac_package_path+' RemoteViewer.py');sys.exit()
mac_package_path = string.replace(package_path, 'python',
'AltAnalyzeViewer.app/Contents/MacOS/AltAnalyzeViewer')
import subprocess
subprocess.call([mac_package_path]);
sys.exit()
"""
import threading
import wx
app = wx.PySimpleApp()
t = threading.Thread(target=callWXPython)
t.setDaemon(1)
t.start()
s = 1
queue = mlp.Queue()
proc = mlp.Process(target=callWXPython) ### passing sys.stdout unfortunately doesn't work to pass the Tk string
proc.start()
sys.exit()
"""
reload(UI)
expr_var, alt_var, additional_var, goelite_var, exp_file_location_db = UI.getUserParameters(skip_intro, Multi=mlp)
"""except Exception:
if 'SystemExit' not in str(traceback.format_exc()):
expr_var, alt_var, additional_var, goelite_var, exp_file_location_db = UI.getUserParameters('yes')
else: sys.exit()"""
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]
apt_location = fl.APTLocation()
root_dir = fl.RootDir()
try:
probability_statistic = fl.ProbabilityStatistic()
except Exception:
probability_statistic = 'unpaired t-test'
time_stamp = timestamp()
log_file = filepath(root_dir + 'AltAnalyze_report-' + time_stamp + '.log')
log_report = open(log_file, 'w');
log_report.close()
if use_Tkinter == 'yes' and debug_mode == 'no':
try:
global root;
root = Tk()
StatusWindow(root, expr_var, alt_var, goelite_var, additional_var, exp_file_location_db)
root.destroy()
except Exception, exception:
try:
print traceback.format_exc()
badExit()
except Exception:
sys.exit()
else:
AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db, '')
def badExit():
print "\n...exiting AltAnalyze due to unexpected error"
try:
time_stamp = timestamp()
print_out = "Unknown error encountered during data processing.\nPlease see logfile in:\n\n" + log_file + "\nand report to [email protected]."
try:
if len(log_file) > 0:
if commandLineMode == 'no':
if os.name == 'nt':
try:
os.startfile('"' + log_file + '"')
except Exception:
os.system('open "' + log_file + '"')
elif 'darwin' in sys.platform:
os.system('open "' + log_file + '"')
elif 'linux' in sys.platform:
os.system('xdg-open "' + log_file + '"')
if commandLineMode == 'no':
try:
UI.WarningWindow(print_out, 'Error Encountered!'); root.destroy()
except Exception:
print print_out
except Exception:
sys.exit()
except Exception:
sys.exit()
sys.exit()
kill
def AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db, root):
### Hard-coded defaults
w = 'Agilent';
x = 'Affymetrix';
y = 'Ensembl';
z = 'any';
data_source = y;
constitutive_source = z;
manufacturer = x ### Constitutive source, is only really paid attention to if Ensembl, otherwise Affymetrix is used (even if default)
### Get default options for ExpressionBuilder and AltAnalyze
start_time = time.time()
test_goelite = 'no';
test_results_pannel = 'no'
global species;
global array_type;
global expression_data_format;
global use_R;
use_R = 'no'
global analysis_method;
global p_threshold;
global filter_probeset_types
global permute_p_threshold;
global perform_permutation_analysis;
global export_NI_values
global run_MiDAS;
global analyze_functional_attributes;
global microRNA_prediction_method
global calculate_normIntensity_p;
global pathway_permutations;
global avg_all_for_ss;
global analyze_all_conditions
global remove_intronic_junctions
global agglomerate_inclusion_probesets;
global expression_threshold;
global factor_out_expression_changes
global only_include_constitutive_containing_genes;
global remove_transcriptional_regulated_genes;
global add_exons_to_annotations
global exclude_protein_details;
global filter_for_AS;
global use_direct_domain_alignments_only;
global run_from_scratch
global explicit_data_type;
explicit_data_type = 'null'
global altanalyze_files;
altanalyze_files = []
species, array_type, manufacturer, constitutive_source, dabg_p, raw_expression_threshold, avg_all_for_ss, expression_data_format, include_raw_data, run_from_scratch, perform_alt_analysis = expr_var
analysis_method, p_threshold, filter_probeset_types, alt_exon_fold_variable, gene_expression_cutoff, remove_intronic_junctions, permute_p_threshold, perform_permutation_analysis, export_NI_values, analyze_all_conditions = alt_var
calculate_normIntensity_p, run_MiDAS, use_direct_domain_alignments_only, microRNA_prediction_method, filter_for_AS, additional_algorithms = additional_var
ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, resources_to_analyze, pathway_permutations, mod, returnPathways = goelite_var
original_remove_intronic_junctions = remove_intronic_junctions
if run_from_scratch == 'Annotate External Results': analysis_method = 'external'
if returnPathways == 'no' or returnPathways == 'None':
returnPathways = None
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset]
try:
exon_exp_threshold = fl.ExonExpThreshold()
except Exception:
exon_exp_threshold = 'NA'
try:
gene_exp_threshold = fl.GeneExpThreshold()
except Exception:
gene_exp_threshold = 'NA'
try:
exon_rpkm_threshold = fl.ExonRPKMThreshold()
except Exception:
exon_rpkm_threshold = 'NA'
try:
rpkm_threshold = fl.RPKMThreshold() ### Gene-Level
except Exception:
rpkm_threshold = 'NA'
fl.setJunctionExpThreshold(
raw_expression_threshold) ### For RNA-Seq, this specifically applies to exon-junctions
try:
predictGroups = fl.predictGroups()
except Exception:
predictGroups = False
try:
if fl.excludeLowExpressionExons():
excludeLowExpExons = 'yes'
else:
excludeLowExpExons = 'no'
except Exception:
excludeLowExpExons = 'no'
if test_goelite == 'yes': ### It can be difficult to get error warnings from GO-Elite, unless run here
results_dir = filepath(fl.RootDir())
elite_input_dirs = ['AltExonConfirmed', 'AltExon', 'regulated', 'upregulated',
'downregulated'] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir + 'GO-Elite/' + elite_dir, results_dir + 'GO-Elite/denominator', results_dir + 'GO-Elite/' + elite_dir
variables = species, mod, pathway_permutations, filter_method, z_threshold, p_val_threshold, change_threshold, resources_to_analyze, returnPathways, file_dirs, root
GO_Elite.remoteAnalysis(variables, 'non-UI', Multi=mlp)
global perform_element_permutation_analysis;
global permutations
perform_element_permutation_analysis = 'yes';
permutations = 2000
analyze_functional_attributes = 'yes' ### Do this by default (shouldn't substantially increase runtime)
if run_from_scratch != 'Annotate External Results' and (array_type != "3'array" and array_type != 'RNASeq'):
if run_from_scratch != 'Process AltAnalyze filtered':
try:
raw_expression_threshold = float(raw_expression_threshold)
except Exception:
raw_expression_threshold = 1
if raw_expression_threshold < 1:
raw_expression_threshold = 1
print "Expression threshold < 1, forcing to be a minimum of 1."
try:
dabg_p = float(dabg_p)
except Exception:
dabg_p = 0
if dabg_p == 0 or dabg_p > 1:
print "Invalid dabg-p value threshold entered,(", dabg_p, ") setting to default of 0.05"
dabg_p = 0.05
if use_direct_domain_alignments_only == 'direct-alignment': use_direct_domain_alignments_only = 'yes'
if run_from_scratch == 'Process CEL files': expression_data_format = 'log'
print "Beginning AltAnalyze Analysis... Format:", expression_data_format
if array_type == 'RNASeq':
id_name = 'exon/junction IDs'
else:
id_name = 'array IDs'
print_items = []; #print [permute_p_threshold]; sys.exit()
print_items.append("AltAnalyze version 2.0.9 - Expression Analysis Parameters Being Used...")
print_items.append('\t' + 'database' + ': ' + unique.getCurrentGeneDatabaseVersion())
print_items.append('\t' + 'species' + ': ' + species)
print_items.append('\t' + 'method' + ': ' + array_type)
print_items.append('\t' + 'manufacturer' + ': ' + manufacturer)
print_items.append('\t' + 'probability_statistic' + ': ' + probability_statistic)
print_items.append('\t' + 'constitutive_source' + ': ' + constitutive_source)
print_items.append('\t' + 'dabg_p' + ': ' + str(dabg_p))
if array_type == 'RNASeq':
print_items.append('\t' + 'junction expression threshold' + ': ' + str(raw_expression_threshold))
print_items.append('\t' + 'exon_exp_threshold' + ': ' + str(exon_exp_threshold))
print_items.append('\t' + 'gene_exp_threshold' + ': ' + str(gene_exp_threshold))
print_items.append('\t' + 'exon_rpkm_threshold' + ': ' + str(exon_rpkm_threshold))
print_items.append('\t' + 'gene_rpkm_threshold' + ': ' + str(rpkm_threshold))
print_items.append('\t' + 'exclude low expressing exons for RPKM' + ': ' + excludeLowExpExons)
else:
print_items.append('\t' + 'raw_expression_threshold' + ': ' + str(raw_expression_threshold))
print_items.append('\t' + 'avg_all_for_ss' + ': ' + avg_all_for_ss)
print_items.append('\t' + 'expression_data_format' + ': ' + expression_data_format)
print_items.append('\t' + 'include_raw_data' + ': ' + include_raw_data)
print_items.append('\t' + 'run_from_scratch' + ': ' + run_from_scratch)
print_items.append('\t' + 'perform_alt_analysis' + ': ' + perform_alt_analysis)
if avg_all_for_ss == 'yes':
cs_type = 'core'
else:
cs_type = 'constitutive'
print_items.append('\t' + 'calculate_gene_expression_using' + ': ' + cs_type)
print_items.append("Alternative Exon Analysis Parameters Being Used...")
print_items.append('\t' + 'analysis_method' + ': ' + analysis_method)
print_items.append('\t' + 'p_threshold' + ': ' + str(p_threshold))
print_items.append('\t' + 'filter_data_types' + ': ' + filter_probeset_types)
print_items.append('\t' + 'alt_exon_fold_variable' + ': ' + str(alt_exon_fold_variable))
print_items.append('\t' + 'gene_expression_cutoff' + ': ' + str(gene_expression_cutoff))
print_items.append('\t' + 'remove_intronic_junctions' + ': ' + remove_intronic_junctions)
print_items.append('\t' + 'avg_all_for_ss' + ': ' + avg_all_for_ss)
print_items.append('\t' + 'permute_p_threshold' + ': ' + str(permute_p_threshold))
print_items.append('\t' + 'perform_permutation_analysis' + ': ' + perform_permutation_analysis)
print_items.append('\t' + 'export_NI_values' + ': ' + export_NI_values)
print_items.append('\t' + 'run_MiDAS' + ': ' + run_MiDAS)
print_items.append('\t' + 'use_direct_domain_alignments_only' + ': ' + use_direct_domain_alignments_only)
print_items.append('\t' + 'microRNA_prediction_method' + ': ' + microRNA_prediction_method)
print_items.append('\t' + 'analyze_all_conditions' + ': ' + analyze_all_conditions)
print_items.append('\t' + 'filter_for_AS' + ': ' + filter_for_AS)
if pathway_permutations == 'NA':
run_GOElite = 'decide_later'
else:
run_GOElite = 'run-immediately'
print_items.append('\t' + 'run_GOElite' + ': ' + run_GOElite)
universalPrintFunction(print_items)
if commandLineMode == 'yes': print 'Running command line mode:', commandLineMode
summary_data_db['gene_assayed'] = 0
summary_data_db['denominator_exp_genes'] = 0
summary_data_db['alt_events'] = 0
summary_data_db['denominator_exp_events'] = 0
summary_data_db['alt_genes'] = 0
summary_data_db['direct_domain_genes'] = 0
summary_data_db['miRNA_gene_denom'] = 0
summary_data_db['miRNA_gene_hits'] = 0
if test_results_pannel == 'yes': ### It can be difficult to get error warnings from GO-Elite, unless run here
graphic_links = []
graphic_links.append(['test', 'Config/AltAnalyze_structure-RNASeq.jpg'])
summary_data_db['QC'] = graphic_links
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
dataset = 'test';
results_dir = ''
print "Analysis Complete\n";
if root != '' and root != None:
UI.InfoWindow(print_out, 'Analysis Completed!')
tl = Toplevel();
SummaryResultsWindow(tl, 'GE', results_dir, dataset, 'parent', summary_data_db)
root.destroy();
sys.exit()
global export_go_annotations;
global aspire_output_list;
global aspire_output_gene_list
global filter_probesets_by;
global global_addition_factor;
global onlyAnalyzeJunctions
global log_fold_cutoff;
global aspire_cutoff;
global annotation_system;
global alt_exon_logfold_cutoff
"""dabg_p = 0.75; data_type = 'expression' ###used for expression analysis when dealing with AltMouse arrays
a = "3'array"; b = "exon"; c = "AltMouse"; e = "custom"; array_type = c
l = 'log'; n = 'non-log'; expression_data_format = l
hs = 'Hs'; mm = 'Mm'; dr = 'Dr'; rn = 'Rn'; species = mm
include_raw_data = 'yes'; expression_threshold = 70 ### Based on suggestion from BMC Genomics. 2006 Dec 27;7:325. PMID: 17192196, for hu-exon 1.0 st array
avg_all_for_ss = 'no' ###Default is 'no' since we don't want all probes averaged for the exon arrays"""
###### Run ExpressionBuilder ######
"""ExpressionBuilder is used to:
(1) extract out gene expression values, provide gene annotations, and calculate summary gene statistics
(2) filter probesets based DABG p-values and export to pair-wise comparison files
(3) build array annotations files matched to gene structure features (e.g. exons, introns) using chromosomal coordinates
options 1-2 are executed in remoteExpressionBuilder and option 3 is by running ExonArrayEnsembl rules"""
try:
additional_algorithm = additional_algorithms.Algorithm()
additional_score = additional_algorithms.Score()
except Exception:
additional_algorithm = 'null'; additional_score = 'null'
if analysis_method == 'FIRMA':
analyze_metaprobesets = 'yes'
elif additional_algorithm == 'FIRMA':
analyze_metaprobesets = 'yes'
else:
analyze_metaprobesets = 'no'
### Check to see if this is a real or FAKE (used for demonstration purposes) dataset
if run_from_scratch == 'Process CEL files' or 'Feature Extraction' in run_from_scratch:
for dataset in exp_file_location_db:
if run_from_scratch == 'Process CEL files':
fl = exp_file_location_db[dataset]
pgf_file = fl.InputCDFFile()
results_dir = filepath(fl.RootDir())
if '_demo' in pgf_file: ### Thus we are running demo CEL files and want to quit immediately
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
try:
print "Analysis Complete\n";
if root != '' and root != None:
UI.InfoWindow(print_out, 'Analysis Completed!')
tl = Toplevel();
SummaryResultsWindow(tl, 'AS', results_dir, dataset, 'parent', summary_data_db)
except Exception:
null = []
skip_intro = 'yes'
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
reload(UI)
UI.getUpdatedParameters(array_type, species, run_from_scratch, results_dir)
try:
AltAnalyzeSetup('no')
except Exception:
sys.exit()
if 'CEL files' in run_from_scratch:
import APT
try:
try:
APT.probesetSummarize(exp_file_location_db, analyze_metaprobesets, filter_probeset_types, species,
root)
if analyze_metaprobesets == 'yes':
analyze_metaprobesets = 'no' ### Re-run the APT analysis to obtain probeset rather than gene-level results (only the residuals are needed from a metaprobeset run)
APT.probesetSummarize(exp_file_location_db, analyze_metaprobesets, filter_probeset_types,
species, root)
except Exception:
import platform
print "Trying to change APT binary access privileges"
for dataset in exp_file_location_db: ### Instance of the Class ExpressionFileLocationData
fl = exp_file_location_db[dataset];
apt_dir = fl.APTLocation()
if '/bin' in apt_dir:
apt_file = apt_dir + '/apt-probeset-summarize' ### if the user selects an APT directory
elif os.name == 'nt':
apt_file = apt_dir + '/PC/' + platform.architecture()[0] + '/apt-probeset-summarize.exe'
elif 'darwin' in sys.platform:
apt_file = apt_dir + '/Mac/apt-probeset-summarize'
elif 'linux' in sys.platform:
if '32bit' in platform.architecture():
apt_file = apt_dir + '/Linux/32bit/apt-probeset-summarize'
elif '64bit' in platform.architecture():
apt_file = apt_dir + '/Linux/64bit/apt-probeset-summarize'
apt_file = filepath(apt_file)
os.chmod(apt_file, 0777)
midas_dir = string.replace(apt_file, 'apt-probeset-summarize', 'apt-midas')
os.chmod(midas_dir, 0777)
APT.probesetSummarize(exp_file_location_db, analysis_method, filter_probeset_types, species, root)
except Exception:
print_out = 'AltAnalyze encountered an un-expected error while running Affymetrix\n'
print_out += 'Power Tools (APT). Additional information may be found in the directory\n'
print_out += '"ExpressionInput/APT" in the output directory. You may also encounter issues\n'
print_out += 'if you are logged into an account with restricted priveledges.\n\n'
print_out += 'If this issue can not be resolved, contact AltAnalyze help or run RMA outside\n'
print_out += 'of AltAnalyze and import the results using the analysis option "expression file".\n'
print traceback.format_exc()
try:
UI.WarningWindow(print_out, 'Exit')
root.destroy();
sys.exit()
except Exception:
print print_out;
sys.exit()
elif 'Feature Extraction' in run_from_scratch:
import ProcessAgilentArrays
try:
ProcessAgilentArrays.agilentSummarize(exp_file_location_db)
except Exception:
print_out = 'Agilent array import and processing failed... see error log for details...'
print traceback.format_exc()
try:
UI.WarningWindow(print_out, 'Exit')
root.destroy();
sys.exit()
except Exception:
print print_out;
sys.exit()
reload(ProcessAgilentArrays)
if run_from_scratch == 'Process RNA-seq reads' or run_from_scratch == 'buildExonExportFiles':
import RNASeq;
reload(RNASeq);
import RNASeq
for dataset in exp_file_location_db: fl = exp_file_location_db[dataset]
### The below function aligns splice-junction coordinates to Ensembl exons from BED Files and
### exports AltAnalyze specific databases that are unique to this dataset to the output directory
try:
fastq_folder = fl.RunKallisto()
except Exception:
print traceback.format_exc()
if len(fastq_folder) > 0:
try:
RNASeq.runKallisto(species, dataset, root_dir, fastq_folder, returnSampleNames=False)
biotypes = 'ran'
except Exception:
biotypes = 'failed'
else:
analyzeBAMs = False;
bedFilesPresent = False
dir_list = unique.read_directory(fl.BEDFileDir())
for file in dir_list:
if '.bam' in string.lower(file):
analyzeBAMs = True
if '.bed' in string.lower(file):
bedFilesPresent = True
if analyzeBAMs and bedFilesPresent == False:
import multiBAMtoBED
bam_dir = fl.BEDFileDir()
refExonCoordinateFile = filepath('AltDatabase/ensembl/' + species + '/' + species + '_Ensembl_exon.txt')
outputExonCoordinateRefBEDfile = bam_dir + '/BedRef/' + species + '_' + string.replace(dataset, 'exp.',
'')
analysisType = ['exon', 'junction', 'reference']
#analysisType = ['junction']
multiBAMtoBED.parallelBAMProcessing(bam_dir, refExonCoordinateFile, outputExonCoordinateRefBEDfile,
analysisType=analysisType, useMultiProcessing=fl.multiThreading(),
MLP=mlp, root=root)
biotypes = RNASeq.alignExonsAndJunctionsToEnsembl(species, exp_file_location_db, dataset, Multi=mlp)
if biotypes == 'failed':
print_out = 'No valid chromosomal positions in the input BED or BioScope files. Exiting AltAnalyze.'
#print traceback.format_exc()
try:
UI.WarningWindow(print_out, 'Exit')
root.destroy();
sys.exit()
except Exception:
print print_out;
sys.exit()
#print '!!!!!back inside AltAnalyze'
#returnLargeGlobalVars()
reload(RNASeq)
#print '!!!!!again'
#returnLargeGlobalVars()
if root_dir in biotypes:
print_out = 'Exon-level BED coordinate predictions exported to:\n' + biotypes
print_out += '\n\nAfter obtaining exon expression estimates, rename exon BED files to\n'
print_out += 'match the junction name (e.g., Sample1__exon.bed and Sample1__junction.bed)\n'
print_out += 'and re-run AltAnalyze (see tutorials at http://altanalyze.org for help).'
UI.InfoWindow(print_out, 'Export Complete')
try:
root.destroy(); sys.exit()
except Exception:
sys.exit()
if predictGroups == True:
expFile = fl.ExpFile()
if array_type == 'RNASeq':
exp_threshold = 100;
rpkm_threshold = 10
else:
exp_threshold = 200;
rpkm_threshold = 8
RNASeq.singleCellRNASeqWorkflow(species, array_type, expFile, mlp, exp_threshold=exp_threshold,
rpkm_threshold=rpkm_threshold)
goelite_run = False
if run_from_scratch == 'Process Expression file' or run_from_scratch == 'Process CEL files' or run_from_scratch == 'Process RNA-seq reads' or 'Feature Extraction' in run_from_scratch:
if fl.NormMatrix() == 'quantile' and 'Feature Extraction' not in run_from_scratch:
import NormalizeDataset
try:
NormalizeDataset.normalizeDataset(fl.ExpFile())
except Exception:
print "Normalization failed for unknown reasons..."
#"""
status = ExpressionBuilder.remoteExpressionBuilder(species, array_type,
dabg_p, raw_expression_threshold, avg_all_for_ss,
expression_data_format,
manufacturer, constitutive_source, data_source,
include_raw_data,
perform_alt_analysis, ge_fold_cutoffs, ge_pvalue_cutoffs,
ge_ptype,
exp_file_location_db, root)
reload(ExpressionBuilder) ### Clears Memory
#"""
graphics = []
if fl.MarkerFinder() == 'yes':
### Identify putative condition-specific marker genees
import markerFinder
fl.setOutputDir(root_dir) ### This needs to be set here
exp_file = fl.ExpFile()
if array_type != "3'array": exp_file = string.replace(exp_file, '.txt', '-steady-state.txt')
markerFinder_inputs = [exp_file, fl.DatasetFile()] ### Output a replicate and non-replicate version
markerFinder_inputs = [exp_file] ### Only considers the replicate and not mean analysis (recommended)
for input_exp_file in markerFinder_inputs:
### This applies to an ExpressionOutput DATASET file compoosed of gene expression values (averages already present)
try:
output_dir = markerFinder.getAverageExpressionValues(input_exp_file,
array_type) ### Either way, make an average annotated file from the DATASET file
except Exception:
print "Unknown MarkerFinder failure (possible filename issue or data incompatibility)..."
print traceback.format_exc()
continue
if 'DATASET' in input_exp_file:
group_exp_file = string.replace(input_exp_file, 'DATASET', 'AVERAGE')
else:
group_exp_file = (input_exp_file, output_dir) ### still analyze the primary sample
compendiumType = 'protein_coding'
if expression_data_format == 'non-log':
logTransform = True
else:
logTransform = False
try:
markerFinder.analyzeData(group_exp_file, species, array_type, compendiumType,
AdditionalParameters=fl, logTransform=logTransform)
except Exception:
None
### Generate heatmaps (unclustered - order by markerFinder)
try:
graphics = markerFinder.generateMarkerHeatMaps(fl, array_type, graphics=graphics)
except Exception:
print traceback.format_exc()
remove_intronic_junctions = original_remove_intronic_junctions ### This var gets reset when running FilterDABG
try:
summary_data_db[
'QC'] = fl.GraphicLinks() + graphics ### provides links for displaying QC and clustering plots
except Exception:
null = [] ### Visualization support through matplotlib either not present or visualization options excluded
#print '!!!!!finished expression builder'
#returnLargeGlobalVars()
expression_data_format = 'log' ### This variable is set from non-log in FilterDABG when present (version 1.16)
try:
parent_dir = fl.RootDir() + '/GO-Elite/regulated/'
dir_list = read_directory(parent_dir)
for file in dir_list:
input_file_dir = parent_dir + '/' + file
inputType = 'IDs'
interactionDirs = ['WikiPathways', 'KEGG', 'BioGRID', 'TFTargets']
output_dir = parent_dir
degrees = 'direct'
input_exp_file = input_file_dir
gsp = UI.GeneSelectionParameters(species, array_type, manufacturer)
gsp.setGeneSet('None Selected')
gsp.setPathwaySelect('')
gsp.setGeneSelection('')
gsp.setOntologyID('')
gsp.setIncludeExpIDs(True)
UI.networkBuilder(input_file_dir, inputType, output_dir, interactionDirs, degrees, input_exp_file, gsp,
'')
except Exception:
print traceback.format_exc()
if status == 'stop':
### See if the array and species are compatible with GO-Elite analysis
system_codes = UI.getSystemInfo()
go_elite_analysis_supported = 'yes'
species_names = UI.getSpeciesInfo()
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset];
results_dir = filepath(fl.RootDir())
### Perform GO-Elite Analysis
if pathway_permutations != 'NA':
try:
print '\nBeginning to run GO-Elite analysis on alternative exon results'
elite_input_dirs = ['AltExonConfirmed', 'AltExon', 'regulated', 'upregulated',
'downregulated'] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir + 'GO-Elite/' + elite_dir, results_dir + 'GO-Elite/denominator', results_dir + 'GO-Elite/' + elite_dir
input_dir = results_dir + 'GO-Elite/' + elite_dir
variables = species, mod, pathway_permutations, filter_method, z_threshold, p_val_threshold, change_threshold, resources_to_analyze, returnPathways, file_dirs, root
try:
input_files = read_directory(input_dir) ### Are there any files to analyze?
except Exception:
input_files = []
if len(input_files) > 0:
try:
GO_Elite.remoteAnalysis(variables, 'non-UI', Multi=mlp); goelite_run = True
except Exception, e:
print e
print "GO-Elite analysis failed"
try:
GO_Elite.moveMAPPFinderFiles(file_dirs[0])
except Exception:
print 'Input GO-Elite files could NOT be moved.'
try:
GO_Elite.moveMAPPFinderFiles(file_dirs[1])
except Exception:
print 'Input GO-Elite files could NOT be moved.'
except Exception:
pass
if goelite_run == False:
print 'No GO-Elite input files to analyze (check your criterion).'
print_out = 'Analysis complete. Gene expression\nsummary exported to "ExpressionOutput".'
try:
if use_Tkinter == 'yes':
print "Analysis Complete\n";
UI.InfoWindow(print_out, 'Analysis Completed!')
tl = Toplevel();
SummaryResultsWindow(tl, 'GE', results_dir, dataset, 'parent', summary_data_db)
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
if go_elite_analysis_supported == 'yes':
UI.getUpdatedParameters(array_type, species, run_from_scratch, file_dirs)
try:
AltAnalyzeSetup('no')
except Exception:
print traceback.format_exc()
sys.exit()
else:
print '\n' + print_out; sys.exit()
except Exception:
#print 'Failed to report status through GUI.'
sys.exit()
else:
altanalyze_files = status[1] ### These files are the comparison files to analyze
elif run_from_scratch == 'update DBs':
null = [] ###Add link to new module here (possibly)
#updateDBs(species,array_type)
sys.exit()
if perform_alt_analysis != 'expression': ###Thus perform_alt_analysis = 'both' or 'alt' (default when skipping expression summary step)
###### Run AltAnalyze ######
global dataset_name;
global summary_results_db;
global summary_results_db2
summary_results_db = {};
summary_results_db2 = {};
aspire_output_list = [];
aspire_output_gene_list = []
onlyAnalyzeJunctions = 'no';
agglomerate_inclusion_probesets = 'no';
filter_probesets_by = 'NA'
if array_type == 'AltMouse' or (
(array_type == 'junction' or array_type == 'RNASeq') and explicit_data_type == 'null'):
if filter_probeset_types == 'junctions-only':
onlyAnalyzeJunctions = 'yes'
elif filter_probeset_types == 'combined-junctions':
agglomerate_inclusion_probesets = 'yes'; onlyAnalyzeJunctions = 'yes'
elif filter_probeset_types == 'exons-only':
analysis_method = 'splicing-index'; filter_probesets_by = 'exon'
if filter_probeset_types == 'combined-junctions' and array_type == 'junction' or array_type == 'RNASeq': filter_probesets_by = 'all'
else:
filter_probesets_by = filter_probeset_types
c = 'Ensembl';
d = 'Entrez Gene'
annotation_system = c
expression_threshold = 0 ###This is different than the raw_expression_threshold (probably shouldn't filter so set to 0)
if analysis_method == 'linearregres-rlm': analysis_method = 'linearregres';use_R = 'yes'
if gene_expression_cutoff < 1:
gene_expression_cutoff = 2 ### A number less than one is invalid
print "WARNING!!!! Invalid gene expression fold cutoff entered,\nusing the default value of 2, must be greater than 1."
log_fold_cutoff = math.log(float(gene_expression_cutoff), 2)
if analysis_method != 'ASPIRE' and analysis_method != 'none':
if p_threshold <= 0 or p_threshold > 1:
p_threshold = 0.05 ### A number less than one is invalid
print "WARNING!!!! Invalid alternative exon p-value threshold entered,\nusing the default value of 0.05."
if alt_exon_fold_variable < 1:
alt_exon_fold_variable = 1 ### A number less than one is invalid
print "WARNING!!!! Invalid alternative exon fold cutoff entered,\nusing the default value of 2, must be greater than 1."
try:
alt_exon_logfold_cutoff = math.log(float(alt_exon_fold_variable), 2)
except Exception:
alt_exon_logfold_cutoff = 1
else:
alt_exon_logfold_cutoff = float(alt_exon_fold_variable)
global_addition_factor = 0
export_junction_comparisons = 'no' ### No longer accessed in this module - only in update mode through a different module
factor_out_expression_changes = 'yes' ### Use 'no' if data is normalized already or no expression normalization for ASPIRE desired
only_include_constitutive_containing_genes = 'yes'
remove_transcriptional_regulated_genes = 'yes'
add_exons_to_annotations = 'no'
exclude_protein_details = 'no'
if analysis_method == 'ASPIRE' or 'linearregres' in analysis_method: annotation_system = d
if 'linear' in analysis_method: analysis_method = 'linearregres'
if 'aspire' in analysis_method: analysis_method = 'ASPIRE'
if array_type == 'AltMouse': species = 'Mm'
#if export_NI_values == 'yes': remove_transcriptional_regulated_genes = 'no'
###Saves run-time while testing the software (global variable stored)
#import_dir = '/AltDatabase/affymetrix/'+species
#dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
### Get Ensembl-GO and pathway annotations from GO-Elite files
universalPrintFunction(["Importing GO-Elite pathway/GO annotations"])
global go_annotations;
go_annotations = {}
import BuildAffymetrixAssociations
go_annotations = BuildAffymetrixAssociations.getEnsemblAnnotationsFromGOElite(species)
global probeset_annotations_file
if array_type == 'RNASeq':
probeset_annotations_file = root_dir + 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_junctions.txt'
elif array_type == 'AltMouse':
probeset_annotations_file = 'AltDatabase/' + species + '/' + array_type + '/' + 'MASTER-probeset-transcript.txt'
else:
probeset_annotations_file = 'AltDatabase/' + species + '/' + array_type + '/' + species + '_Ensembl_probesets.txt'
#"""
if analysis_method != 'none':
analysis_summary = RunAltAnalyze() ### Only run if analysis methods is specified (only available for RNA-Seq and junction analyses)
else:
analysis_summary = None
if analysis_summary != None:
summary_results_db, aspire_output_gene_list, number_events_analyzed = analysis_summary
summary_data_db2 = copy.deepcopy(summary_data_db)
for i in summary_data_db2: del summary_data_db[
i] ### If we reset the variable it violates it's global declaration... do this instead
#universalPrintFunction(['Alternative Exon Results for Junction Comparisons:'])
#for i in summary_data_db: universalPrintFunction([i+' '+ str(summary_data_db[i])])
exportSummaryResults(summary_results_db, analysis_method, aspire_output_list, aspire_output_gene_list,
annotate_db, array_type, number_events_analyzed, root_dir)
else:
### Occurs for RNASeq when no junctions are present
summary_data_db2 = {}
if array_type == 'junction' or array_type == 'RNASeq':
#Reanalyze junction array data separately for individual probests rather than recipricol junctions
if array_type == 'junction':
explicit_data_type = 'exon'
elif array_type == 'RNASeq':
explicit_data_type = 'junction'
else:
report_single_probeset_results = 'no'
### Obtain exon analysis defaults
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults('exon',
species)
analysis_method, null, filter_probeset_types, null, null, alt_exon_fold_variable, null, null, null, null, null, null, null, calculate_normIntensity_p, null = alt_exon_defaults
filter_probesets_by = filter_probeset_types
if additional_algorithm == 'splicing-index' or additional_algorithm == 'FIRMA':
analysis_method = additional_algorithm
#print [analysis_method], [filter_probeset_types], [p_threshold], [alt_exon_fold_variable]
try:
alt_exon_logfold_cutoff = math.log(float(additional_score), 2)
except Exception:
alt_exon_logfold_cutoff = 1
agglomerate_inclusion_probesets = 'no'
try:
summary_results_db, aspire_output_gene_list, number_events_analyzed = RunAltAnalyze()
exportSummaryResults(summary_results_db, analysis_method, aspire_output_list,
aspire_output_gene_list, annotate_db, 'exon', number_events_analyzed, root_dir)
if len(summary_data_db2) == 0: summary_data_db2 = summary_data_db; explicit_data_type = 'exon-only'
#universalPrintFunction(['Alternative Exon Results for Individual Probeset Analyses:'])
#for i in summary_data_db: universalPrintFunction([i+' '+ str(summary_data_db[i])])
except Exception:
print traceback.format_exc()
None
#"""
### Perform dPSI Analysis
try:
if 'counts.' in fl.CountsFile():
pass
else:
dir_list = read_directory(fl.RootDir() + 'ExpressionInput')
for file in dir_list:
if 'exp.' in file and 'steady-state' not in file:
fl.setExpFile(fl.RootDir() + 'ExpressionInput/' + file)
#print [fl.RootDir()+'ExpressionInput/'+file]
except Exception:
search_dir = fl.RootDir() + '/ExpressionInput'
files = unique.read_directory(fl.RootDir() + '/ExpressionInput')
for file in files:
if 'exp.' in file and 'steady-state.txt' not in file:
fl.setExpFile(search_dir + '/' + file)
try:
#"""
try:
graphic_links2, cluster_input_file = ExpressionBuilder.unbiasedComparisonSpliceProfiles(fl.RootDir(),
species,
array_type,
expFile=fl.CountsFile(),
min_events=0,
med_events=1)
except Exception:
pass
#"""
inputpsi = fl.RootDir() + 'AltResults/AlternativeOutput/' + species + '_' + array_type + '_top_alt_junctions-PSI-clust.txt'
### Calculate ANOVA p-value stats based on groups
matrix, compared_groups, original_data = statistics.matrixImport(inputpsi)
matrix_pvalues = statistics.runANOVA(inputpsi, matrix, compared_groups)
anovaFilteredDir = statistics.returnANOVAFiltered(inputpsi, original_data, matrix_pvalues)
graphic_link1 = ExpressionBuilder.exportHeatmap(anovaFilteredDir)
try:
summary_data_db2['QC'] += graphic_link1
except Exception:
summary_data_db2['QC'] = graphic_link1
except Exception:
print traceback.format_exc()
import RNASeq
try:
graphic_link = RNASeq.compareExonAndJunctionResults(species, array_type, summary_results_db, root_dir)
try:
summary_data_db2['QC'] += graphic_link
except Exception:
summary_data_db2['QC'] = graphic_link
except Exception:
print traceback.format_exc()
#"""
### Export the top 15 spliced genes
try:
altresult_dir = fl.RootDir() + '/AltResults/'
splicing_results_root = altresult_dir + '/Clustering/'
dir_list = read_directory(splicing_results_root)
gene_string = ''
altanalyze_results_folder = altresult_dir + '/RawSpliceData/' + species
### Lookup the raw expression dir
expression_results_folder = string.replace(altresult_dir, 'AltResults', 'ExpressionInput')
expression_dir = UI.getValidExpFile(expression_results_folder)
try:
altresult_dir = UI.getValidSplicingScoreFile(altanalyze_results_folder)
except Exception, e:
print traceback.format_exc()
for file in dir_list:
if 'AltExonConfirmed' in file:
gene_dir = splicing_results_root + '/' + file
genes = UI.importGeneList(gene_dir, limit=50) ### list of gene IDs or symbols
gene_string = gene_string + ',' + genes
print 'Imported genes from', file, '\n'
show_introns = False
analysisType = 'plot'
for file in dir_list:
if 'Combined-junction-exon-evidence' in file and 'top' not in file:
gene_dir = splicing_results_root + '/' + file
try:
isoform_dir = UI.exportJunctionList(gene_dir, limit=50) ### list of gene IDs or symbols
except Exception:
print traceback.format_exc()
UI.altExonViewer(species, array_type, expression_dir, gene_string, show_introns, analysisType, None);
print 'completed'
UI.altExonViewer(species, array_type, altresult_dir, gene_string, show_introns, analysisType, None);
print 'completed'
except Exception:
print traceback.format_exc()
try:
top_PSI_junction = inputpsi[:-4] + '-ANOVA.txt'
isoform_dir2 = UI.exportJunctionList(top_PSI_junction, limit=50) ### list of gene IDs or symbols
except Exception:
print traceback.format_exc()
try:
analyzeBAMs = False
dir_list = unique.read_directory(fl.RootDir())
for file in dir_list:
if '.bam' in string.lower(file):
analyzeBAMs = True
if analyzeBAMs:
### Create sashimi plot index
import SashimiIndex
SashimiIndex.remoteIndexing(species, fl)
import SashimiPlot
print 'Exporting Sashimi Plots for the top-predicted splicing events... be patient'
try:
SashimiPlot.remoteSashimiPlot(species, fl, fl.RootDir(),
isoform_dir) ### assuming the bam files are in the root-dir
except Exception:
pass
print 'completed'
SashimiPlot.remoteSashimiPlot(species, fl, fl.RootDir(),
isoform_dir2) ### assuming the bam files are in the root-dir
print 'completed'
else:
print 'No BAM files present in the root directory... skipping SashimiPlot analysis...'
except Exception:
print traceback.format_exc()
try:
clearObjectsFromMemory(exon_db);
clearObjectsFromMemory(constitutive_probeset_db)
clearObjectsFromMemory(go_annotations);
clearObjectsFromMemory(original_microRNA_z_score_data)
clearObjectsFromMemory(last_exon_region_db)
"""
print 'local vars'
all = [var for var in locals() if (var[:2], var[-2:]) != ("__", "__")]
for var in all:
try:
if len(locals()[var])>500: print var, len(locals()[var])
except Exception: null=[]
"""
except Exception:
null = []
#print '!!!!!finished'
#returnLargeGlobalVars()
end_time = time.time();
time_diff = int(end_time - start_time)
universalPrintFunction(["Analyses finished in %d seconds" % time_diff])
#universalPrintFunction(["Hit Enter/Return to exit AltAnalyze"])
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset];
results_dir = filepath(fl.RootDir())
### Perform GO-Elite Analysis
if pathway_permutations != 'NA':
goelite_run = False
print '\nBeginning to run GO-Elite analysis on alternative exon results'
elite_input_dirs = ['AltExonConfirmed', 'AltExon', 'regulated', 'upregulated',
'downregulated'] ### Run GO-Elite multiple times to ensure heatmaps are useful and to better organize results
for elite_dir in elite_input_dirs:
file_dirs = results_dir + 'GO-Elite/' + elite_dir, results_dir + 'GO-Elite/denominator', results_dir + 'GO-Elite/' + elite_dir
input_dir = results_dir + 'GO-Elite/' + elite_dir
try:
input_files = read_directory(input_dir) ### Are there any files to analyze?
except Exception:
input_files = []
if len(input_files) > 0:
variables = species, mod, pathway_permutations, filter_method, z_threshold, p_val_threshold, change_threshold, resources_to_analyze, returnPathways, file_dirs, root
try:
GO_Elite.remoteAnalysis(variables, 'non-UI', Multi=mlp); goelite_run = True
except Exception, e:
print e
print "GO-Elite analysis failed"
try:
GO_Elite.moveMAPPFinderFiles(file_dirs[0])
except Exception:
print 'Input GO-Elite files could NOT be moved.'
try:
GO_Elite.moveMAPPFinderFiles(file_dirs[1])
except Exception:
print 'Input GO-Elite files could NOT be moved.'
if goelite_run == False:
print 'No GO-Elite input files to analyze (check your criterion).'
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
try:
if root != '' and root != None:
print "Analysis Complete\n";
UI.InfoWindow(print_out, 'Analysis Completed!')
tl = Toplevel();
SummaryResultsWindow(tl, 'AS', results_dir, dataset_name, 'specific', summary_data_db2)
except Exception:
print traceback.format_exc()
pass #print 'Failed to open GUI.'
skip_intro = 'yes'
if root != '' and root != None:
if pathway_permutations == 'NA' and run_from_scratch != 'Annotate External Results':
try:
UI.getUpdatedParameters(array_type, species, run_from_scratch, file_dirs)
except Exception:
pass
try:
AltAnalyzeSetup('no')
except Exception:
sys.exit()
def exportSummaryResults(summary_results_db, analysis_method, aspire_output_list, aspire_output_gene_list, annotate_db,
array_type, number_events_analyzed, root_dir):
try:
ResultsExport_module.outputSummaryResults(summary_results_db, '', analysis_method, root_dir)
#ResultsExport_module.outputSummaryResults(summary_results_db2,'-uniprot_attributes',analysis_method)
ResultsExport_module.compareAltAnalyzeResults(aspire_output_list, annotate_db, number_events_analyzed, 'no',
analysis_method, array_type, root_dir)
ResultsExport_module.compareAltAnalyzeResults(aspire_output_gene_list, annotate_db, '', 'yes', analysis_method,
array_type, root_dir)
except UnboundLocalError:
print "...No results to summarize" ###Occurs if there is a problem parsing these files
def checkGOEliteProbesets(fn, species):
### Get all probesets in GO-Elite files
mod_source = 'Ensembl' + '-' + 'Affymetrix'
import gene_associations
try:
ensembl_to_probeset_id = gene_associations.getGeneToUid(species, mod_source)
except Exception:
ensembl_to_probeset_id = {}
mod_source = 'EntrezGene' + '-' + 'Affymetrix'
try:
entrez_to_probeset_id = gene_associations.getGeneToUid(species, mod_source)
except Exception:
entrez_to_probeset_id = {}
probeset_db = {}
for gene in ensembl_to_probeset_id:
for probeset in ensembl_to_probeset_id[gene]: probeset_db[probeset] = []
for gene in entrez_to_probeset_id:
for probeset in entrez_to_probeset_id[gene]: probeset_db[probeset] = []
###Import an Affymetrix array annotation file (from http://www.affymetrix.com) and parse out annotations
csv_probesets = {};
x = 0;
y = 0
fn = filepath(fn);
status = 'no'
for line in open(fn, 'r').readlines():
probeset_data = string.replace(line, '\n', '') #remove endline
probeset_data = string.replace(probeset_data, '---', '')
affy_data = string.split(probeset_data[1:-1], '","')
if x == 0 and line[0] != '#':
x = 1;
affy_headers = affy_data
for header in affy_headers:
y = 0
while y < len(affy_headers):
if 'Probe Set ID' in affy_headers[y] or 'probeset_id' in affy_headers[y]: ps = y
y += 1
elif x == 1:
try:
probeset = affy_data[ps]; csv_probesets[probeset] = []
except Exception:
null = []
for probeset in csv_probesets:
if probeset in probeset_db: status = 'yes';break
return status
class SpeciesData:
def __init__(self, abrev, species, systems, taxid):
self._abrev = abrev;
self._species = species;
self._systems = systems;
self._taxid = taxid
def SpeciesCode(self): return self._abrev
def SpeciesName(self): return self._species
def Systems(self): return self._systems
def TaxID(self): return self._taxid
def __repr__(self): return self.SpeciesCode() + '|' + SpeciesName
def getSpeciesInfo():
### Used by AltAnalyze
UI.importSpeciesInfo();
species_names = {}
for species_full in species_codes:
sc = species_codes[species_full];
abrev = sc.SpeciesCode()
species_names[abrev] = species_full
return species_codes, species_names
def importGOEliteSpeciesInfo():
filename = 'Config/goelite_species.txt';
x = 0
fn = filepath(filename);
species_codes = {}
for line in open(fn, 'rU').readlines():
data = cleanUpLine(line)
abrev, species, taxid, compatible_mods = string.split(data, '\t')
if x == 0:
x = 1
else:
compatible_mods = string.split(compatible_mods, '|')
sd = SpeciesData(abrev, species, compatible_mods, taxid)
species_codes[species] = sd
return species_codes
def exportGOEliteSpeciesInfo(species_codes):
fn = filepath('Config/goelite_species.txt');
data = open(fn, 'w');
x = 0
header = string.join(['species_code', 'species_name', 'tax_id', 'compatible_algorithms'], '\t') + '\n'
data.write(header)
for species in species_codes:
if 'other' not in species and 'all-' not in species:
sd = species_codes[species]
mods = string.join(sd.Systems(), '|')
values = [sd.SpeciesCode(), sd.SpeciesName(), sd.TaxID(), mods]
values = string.join(values, '\t') + '\n'
data.write(values)
data.close()
def TimeStamp():
time_stamp = time.localtime()
year = str(time_stamp[0]);
month = str(time_stamp[1]);
day = str(time_stamp[2])
if len(month) < 2: month = '0' + month
if len(day) < 2: day = '0' + day
return year + month + day
def verifyFile(filename):
status = 'not found'
try:
fn = filepath(filename)
for line in open(fn, 'rU').xreadlines(): status = 'found';break
except Exception:
status = 'not found'
return status
def verifyFileLength(filename):
count = 0
try:
fn = filepath(filename)
for line in open(fn, 'rU').xreadlines():
count += 1
if count > 9: break
except Exception:
null = []
return count
def verifyGroupFileFormat(filename):
correct_format = False
try:
fn = filepath(filename)
for line in open(fn, 'rU').xreadlines():
data = cleanUpLine(line)
if len(string.split(data, '\t')) == 3:
correct_format = True
break
except Exception:
correct_format = False
return correct_format
def displayHelp():
fn = filepath('Documentation/commandline.txt')
print '\n################################################\nAltAnalyze Command-Line Help'
for line in open(fn, 'rU').readlines():
print cleanUpLine(line)
print '\n################################################ - END HELP'
sys.exit()
def searchDirectory(directory, var):
directory = unique.filepath(directory)
files = unique.read_directory(directory)
version = unique.getCurrentGeneDatabaseVersion()
for file in files:
if var in file:
location = string.split(directory + '/' + file, version)[1][1:]
return [location]
break
###### Command Line Functions (AKA Headless Mode) ######
def commandLineRun():
print 'Running commandline options'
import getopt
#/hd3/home/nsalomonis/normalization/mir1 - boxer
#python AltAnalyze.py --species Mm --arraytype "3'array" --celdir "C:/CEL" --output "C:/CEL" --expname miR1_column --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --species Hs --arraytype "3'array" --FEdir "C:/FEfiles" --output "C:/FEfiles" --channel_to_extract "green/red ratio" --expname cancer --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --celdir "C:/CEL" --output "C:/CEL" --expname miR1_column
#open ./AltAnalyze.app --celdir "/Users/nsalomonis/Desktop" --output "/Users/nsalomonis/Desktop" --expname test
#python AltAnalyze.py --species Mm --arraytype "3'array" --expdir "C:/CEL/ExpressionInput/exp.miR1_column.txt" --output "C:/CEL" --runGOElite yes --GEelitepval 0.01
#python AltAnalyze.py --species Mm --platform RNASeq --bedDir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles" --groupdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/ExpressionInput/groups.test.txt" --compdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/ExpressionInput/comps.test.txt" --output "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles" --expname "test"
#python AltAnalyze.py --species Mm --platform RNASeq --filterdir "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles/" --output "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/BedFiles"
#python AltAnalyze.py --expdir "/Users/nsalomonis/Desktop/Nathan/ExpressionInput/exp.test.txt" --exonMapFile "/Users/nsalomonis/Desktop/Nathan/hgu133_probe.txt" --species Hs --platform "3'array" --output "/Users/nsalomonis/Desktop/Nathan"
global apt_location;
global root_dir;
global probability_statistic;
global log_file;
global summary_data_db;
summary_data_db = {}
###required
marker_finder = 'no'
manufacturer = 'Affymetrix'
constitutive_source = 'Ensembl'
ensembl_version = 'current'
species_code = None
species = None
main_input_folder = None
output_dir = None
array_type = None
input_annotation_file = None
groups_file = None
comps_file = None
input_cdf_file = None
exp_name = None
run_GOElite = 'yes'
visualize_qc_results = 'yes'
run_lineage_profiler = 'yes'
input_exp_file = ''
cel_file_dir = ''
input_stats_file = ''
input_filtered_dir = ''
external_annotation_dir = ''
xhyb_remove = 'no'
update_method = []
update_dbs = 'no'
analyze_all_conditions = 'no'
return_all = 'no'
additional_array_types = []
remove_intronic_junctions = 'no'
ignore_built_species = 'no'
build_exon_bedfile = 'no'
compendiumType = 'protein_coding'
probability_statistic = 'unpaired t-test'
specific_array_type = None
additional_resources = [None]
wpid = None
mod = 'Ensembl'
transpose = False
input_file_dir = None
denom_file_dir = None
image_export = []
selected_species = ['Hs', 'Mm', 'Rn'] ### These are the species that additional array types are currently supported
selected_platforms = ['AltMouse', 'exon', 'gene', 'junction']
returnPathways = 'no'
compendiumPlatform = 'gene'
exonMapFile = None
platformType = None ### This option is used to store the orignal platform type
perform_alt_analysis = 'no'
mappedExonAnalysis = False ### Map the original IDs to the RNA-Seq exon database (when True)
microRNA_prediction_method = None
pipelineAnalysis = True
OntologyID = ''
PathwaySelection = ''
GeneSetSelection = ''
interactionDirs = []
inputType = 'ID list'
Genes = ''
degrees = 'direct'
includeExpIDs = True
update_interactions = False
data_type = 'raw expression'
batch_effects = 'no'
channel_to_extract = None
normalization = False
justShowTheseIDs = ''
display = False
accessoryAnalysis = ''
modelSize = None
geneModel = False
run_from_scratch = None
systemToUse = None ### For other IDs
custom_reference = False
multiThreading = True
genesToReport = 60
correlateAll = True
expression_data_format = 'log'
runICGS = False
IDtype = None
runKallisto = False
original_arguments = sys.argv
arguments = []
for arg in original_arguments:
arg = string.replace(arg, '\xe2\x80\x9c', '') ### These are non-standard forward quotes
arg = string.replace(arg, '\xe2\x80\x9d', '') ### These are non-standard reverse quotes
arg = string.replace(arg, '\xe2\x80\x93', '-') ### These are non-standard dashes
arg = string.replace(arg, '\x96', '-') ### These are non-standard dashes
arg = string.replace(arg, '\x93', '') ### These are non-standard forward quotes
arg = string.replace(arg, '\x94', '') ### These are non-standard reverse quotes
arguments.append(arg)
print '\nArguments input:', arguments, '\n'
if '--help' in arguments[1:] or '--h' in arguments[1:]:
try:
displayHelp() ### Print out a help file and quit
except Exception:
print 'See: http://www.altanalyze.org for documentation and command-line help';sys.exit()
if 'AltAnalyze' in arguments[1]:
arguments = arguments[
1:] ### Occurs on Ubuntu with the location of AltAnalyze being added to sys.argv (exclude this since no argument provided for this var)
try:
options, remainder = getopt.getopt(arguments[1:], '', ['species=', 'mod=', 'elitepval=', 'elitepermut=',
'method=', 'zscore=', 'pval=', 'num=',
'runGOElite=', 'denom=', 'output=', 'arraytype=',
'celdir=', 'expdir=', 'output=', 'statdir=',
'filterdir=', 'cdfdir=', 'csvdir=', 'expname=',
'dabgp=', 'rawexp=', 'avgallss=', 'logexp=',
'inclraw=', 'runalt=', 'altmethod=', 'altp=',
'probetype=', 'altscore=', 'GEcutoff=',
'exportnormexp=', 'calcNIp=', 'runMiDAS=',
'GEcutoff=', 'GEelitepval=', 'mirmethod=', 'ASfilter=',
'vendor=', 'GEelitefold=', 'update=', 'version=',
'analyzeAllGroups=', 'GEeliteptype=', 'force=',
'resources_to_analyze=', 'dataToAnalyze=', 'returnAll=',
'groupdir=', 'compdir=', 'annotatedir=',
'additionalScore=',
'additionalAlgorithm=', 'noxhyb=', 'platform=',
'bedDir=',
'altpermutep=', 'altpermute=',
'removeIntronOnlyJunctions=',
'normCounts=', 'buildExonExportFile=', 'groupStat=',
'compendiumPlatform=', 'rpkm=', 'exonExp=',
'specificArray=',
'ignoreBuiltSpecies=', 'ORAstat=', 'outputQCPlots=',
'runLineageProfiler=', 'input=', 'image=', 'wpid=',
'additional=', 'row_method=', 'column_method=',
'row_metric=', 'column_metric=', 'color_gradient=',
'transpose=', 'returnPathways=', 'compendiumType=',
'exonMapFile=', 'geneExp=', 'labels=', 'contrast=',
'plotType=', 'geneRPKM=', 'exonRPKM=',
'runMarkerFinder=',
'update_interactions=', 'includeExpIDs=', 'degrees=',
'genes=', 'inputType=', 'interactionDirs=',
'GeneSetSelection=',
'PathwaySelection=', 'OntologyID=', 'dataType=',
'combat=',
'channelToExtract=', 'showIntrons=', 'display=', 'join=',
'uniqueOnly=', 'accessoryAnalysis=', 'inputIDType=',
'outputIDType=',
'FEdir=', 'channelToExtract=', 'AltResultsDir=',
'geneFileDir=',
'AltResultsDir=', 'modelSize=', 'geneModel=',
'reference=',
'multiThreading=', 'multiProcessing=', 'genesToReport=',
'correlateAll=', 'normalization=', 'justShowTheseIDs=',
'direction=', 'analysisType=', 'algorithm=', 'rho=',
'clusterGOElite=', 'geneSetName=', 'runICGS=', 'IDtype=',
'CountsCutoff=', 'FoldDiff=', 'SamplesDiffering=',
'removeOutliers='
'featurestoEvaluate=', 'restrictBy=',
'ExpressionCutoff=',
'excludeCellCycle=', 'runKallisto=', 'fastq_dir=',
'FDR='])
except Exception:
print traceback.format_exc()
print "There is an error in the supplied command-line arguments (each flag requires an argument)";
sys.exit()
for opt, arg in options:
#print [opt, arg]
if opt == '--species':
species = arg
elif opt == '--arraytype':
if array_type != None:
additional_array_types.append(arg)
else:
array_type = arg; platform = array_type
if specific_array_type == None: specific_array_type = platform
elif opt == '--exonMapFile':
perform_alt_analysis = 'yes' ### Perform alternative exon analysis
exonMapFile = arg
elif opt == '--specificArray':
specific_array_type = arg ### e.g., hGlue
elif opt == '--celdir':
cel_file_dir = arg
elif opt == '--bedDir':
cel_file_dir = arg
elif opt == '--FEdir':
cel_file_dir = arg
elif opt == '--expdir':
input_exp_file = arg
elif opt == '--statdir':
input_stats_file = arg
elif opt == '--filterdir':
input_filtered_dir = arg
elif opt == '--groupdir':
groups_file = arg
elif opt == '--compdir':
comps_file = arg
elif opt == '--cdfdir':
input_cdf_file = arg
elif opt == '--csvdir':
input_annotation_file = arg
elif opt == '--expname':
exp_name = arg
elif opt == '--output':
output_dir = arg
elif opt == '--vendor':
manufacturer = arg
elif opt == '--runICGS':
runICGS = True
elif opt == '--IDtype':
IDtype = arg
elif opt == '--ignoreBuiltSpecies':
ignore_built_species = arg
elif opt == '--platform':
if array_type != None:
additional_array_types.append(arg)
else:
array_type = arg; platform = array_type
if specific_array_type == None: specific_array_type = platform
elif opt == '--update':
update_dbs = 'yes'; update_method.append(arg)
elif opt == '--version':
ensembl_version = arg
elif opt == '--compendiumPlatform':
compendiumPlatform = arg ### platform for which the LineageProfiler compendium is built on
elif opt == '--force':
force = arg
elif opt == '--input':
input_file_dir = arg; pipelineAnalysis = False ### If this option is entered, only perform the indicated analysis
elif opt == '--image':
image_export.append(arg)
elif opt == '--wpid':
wpid = arg
elif opt == '--mod':
mod = arg
elif opt == '--runKallisto':
if arg == 'yes' or string.lower(arg) == 'true':
runKallisto = True
elif opt == '--fastq_dir':
input_fastq_dir = arg
elif opt == '--additional':
if additional_resources[0] == None:
additional_resources = []
additional_resources.append(arg)
else:
additional_resources.append(arg)
elif opt == '--transpose':
if arg == 'True': transpose = True
elif opt == '--runLineageProfiler': ###Variable declared here and later (independent analysis here or pipelined with other analyses later)
run_lineage_profiler = arg
elif opt == '--compendiumType': ### protein-coding, ncRNA, or exon
compendiumType = arg
elif opt == '--denom':
denom_file_dir = arg ### Indicates that GO-Elite is run independent from AltAnalyze itself
elif opt == '--accessoryAnalysis':
accessoryAnalysis = arg
elif opt == '--channelToExtract':
channel_to_extract = arg
elif opt == '--genesToReport':
genesToReport = int(arg)
elif opt == '--correlateAll':
correlateAll = True
elif opt == '--direction':
direction = arg
elif opt == '--logexp':
expression_data_format = arg
elif opt == '--geneRPKM':
rpkm_threshold = arg
elif opt == '--multiThreading' or opt == '--multiProcessing':
multiThreading = arg
if multiThreading == 'yes':
multiThreading = True
elif 'rue' in multiThreading:
multiThreading = True
else:
multiThreading = False
if 'other' in manufacturer or 'Other' in manufacturer:
### For other IDs
systemToUse = array_type
if array_type == None:
print 'Please indicate a ID type as --platform when setting vendor equal to "Other IDs"';
sys.exit()
array_type = "3'array"
if array_type == 'RNASeq': manufacturer = array_type
if platformType == None: platformType = array_type
if perform_alt_analysis == 'yes':
if platform == "3'array":
mappedExonAnalysis = True
cel_file_dir = input_exp_file
exp_name = export.findFilename(input_exp_file)
exp_name = string.replace(exp_name, '.txt', '')
exp_name = string.replace(exp_name, 'exp.', '')
input_exp_file = ''
### To perform alternative exon analyses for platforms without a dedicated database, must happing appropriate mapping info or array type data
### (will need to perform downstream testing for unsupported Affymetrix exon, gene and junction arrays)
if exonMapFile == None and specific_array_type == None and cel_file_dir == '':
print_out = "\nUnable to run!!! Please designate either a specific platfrom (e.g., --specificArray hgU133_2), select CEL files, or an "
print_out += "exon-level mapping file location (--exonMapFile C:/mapping.txt) to perform alternative exon analyses for this platform."
### Will need to check here to see if the platform is supported (local or online files) OR wait until an error is encountered later
######## Perform analyses independent from AltAnalyze database centric analyses that require additional parameters
if len(image_export) > 0 or len(accessoryAnalysis) > 0 or runICGS:
if runICGS:
#python AltAnalyze.py --runICGS yes --expdir "/Users/saljh8/Desktop/demo/Myoblast/ExpressionInput/exp.myoblast.txt" --platform "3'array" --species Hs --GeneSetSelection BioMarkers --PathwaySelection Heart --column_method hopach --rho 0.4 --ExpressionCutoff 200 --justShowTheseIDs "NKX2-5 T TBX5" --FoldDiff 10 --SamplesDiffering 3 --excludeCellCycle conservative
try:
species = species
except Exception:
'Please designate a species before continuing (e.g., --species Hs)'
try:
array_type = array_type
except Exception:
'Please designate a species before continuing (e.g., --species Hs)'
if len(cel_file_dir) > 0:
values = species, exp_file_location_db, dataset, mlp_instance
StatusWindow(values, 'preProcessRNASeq') ### proceed to run the full discovery analysis here!!!
else:
if len(input_exp_file) > 0:
pass
else:
'Please indicate a source folder or expression file (e.g., --expdir /dataset/singleCells.txt)'
if array_type == 'Other' or 'Other' in array_type:
if ':' in array_type:
array_type, IDtype = string.split(array_type)
array_type == "3'array"
if IDtype == None: IDtype = manufacturer
row_method = 'weighted'
column_method = 'average'
row_metric = 'cosine'
column_metric = 'cosine'
color_gradient = 'yellow_black_blue'
contrast = 3
vendor = manufacturer
GeneSelection = ''
PathwaySelection = ''
GeneSetSelection = 'None Selected'
excludeCellCycle = True
rho_cutoff = 0.4
restrictBy = 'protein_coding'
featurestoEvaluate = 'Genes'
ExpressionCutoff = 1
CountsCutoff = 1
FoldDiff = 2
SamplesDiffering = 3
JustShowTheseIDs = ''
removeOutliers = False
PathwaySelection = []
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--row_method':
row_method = arg
if row_method == 'None': row_method = None
elif opt == '--column_method':
column_method = arg
if column_method == 'None': column_method = None
elif opt == '--row_metric':
row_metric = arg
elif opt == '--column_metric':
column_metric = arg
elif opt == '--color_gradient':
color_gradient = arg
elif opt == '--GeneSetSelection':
GeneSetSelection = arg
elif opt == '--PathwaySelection':
PathwaySelection.append(arg)
elif opt == '--genes':
GeneSelection = arg
elif opt == '--ExpressionCutoff':
ExpressionCutoff = arg
elif opt == '--normalization':
normalization = arg
elif opt == '--justShowTheseIDs':
justShowTheseIDs = arg
elif opt == '--rho':
rho_cutoff = float(arg)
elif opt == '--clusterGOElite':
clusterGOElite = float(arg)
elif opt == '--CountsCutoff':
CountsCutoff = int(float(arg))
elif opt == '--FoldDiff':
FoldDiff = int(float(arg))
elif opt == '--SamplesDiffering':
SamplesDiffering = int(float(arg))
elif opt == '--removeOutliers':
removeOutliers = arg
elif opt == '--featurestoEvaluate':
featurestoEvaluate = arg
elif opt == '--restrictBy':
restrictBy = arg
elif opt == '--excludeCellCycle':
excludeCellCycle = arg
if excludeCellCycle == 'False' or excludeCellCycle == 'no':
excludeCellCycle = False
elif excludeCellCycle == 'True' or excludeCellCycle == 'yes' or excludeCellCycle == 'conservative':
excludeCellCycle = True
elif opt == '--contrast':
try:
contrast = float(arg)
except Exception:
print '--contrast not a valid float';sys.exit()
elif opt == '--vendor':
vendor = arg
elif opt == '--display':
if arg == 'yes':
display = True
elif arg == 'True':
display = True
else:
display = False
if len(PathwaySelection) == 0: PathwaySelection = ''
if len(GeneSetSelection) > 0 or GeneSelection != '':
gsp = UI.GeneSelectionParameters(species, array_type, vendor)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(GeneSelection)
gsp.setJustShowTheseIDs(JustShowTheseIDs)
gsp.setNormalize('median')
gsp.setSampleDiscoveryParameters(ExpressionCutoff, CountsCutoff, FoldDiff, SamplesDiffering,
removeOutliers, featurestoEvaluate, restrictBy, excludeCellCycle,
column_metric, column_method, rho_cutoff)
import RNASeq
mlp_instance = mlp
if cel_file_dir != '':
expFile = cel_file_dir + '/ExpressionInput/' + 'exp.' + exp_name + '.txt'
elif input_exp_file != '':
if 'ExpressionInput' in input_exp_file:
expFile = input_exp_file
else:
### Copy over expression file to ExpressionInput
expdir2 = string.replace(input_exp_file, 'exp.', '')
root_dir = export.findParentDir(expFile)
expFile = root_dir + '/ExpressionInput/exp.' + export.findFilename(expdir2)
export.copyFile(input_exp_file, expFile)
global log_file
root_dir = export.findParentDir(expFile)
root_dir = string.replace(root_dir, '/ExpressionInput', '')
time_stamp = timestamp()
log_file = filepath(root_dir + 'AltAnalyze_report-' + time_stamp + '.log')
log_report = open(log_file, 'w');
log_report.close()
sys.stdout = Logger('')
count = verifyFileLength(expFile[:-4] + '-steady-state.txt')
if count > 1:
expFile = expFile[:-4] + '-steady-state.txt'
elif array_type == 'RNASeq':
### Indicates that the steady-state file doesn't exist. The exp. may exist, be could be junction only so need to re-build from bed files here
values = species, exp_file_location_db, dataset, mlp_instance
StatusWindow(values, 'preProcessRNASeq') ### proceed to run the full discovery analysis here!!!
expFile = expFile[:-4] + '-steady-state.txt'
print [excludeCellCycle]
UI.RemotePredictSampleExpGroups(expFile, mlp_instance, gsp, (
species, array_type)) ### proceed to run the full discovery analysis here!!!
sys.exit()
if 'WikiPathways' in image_export:
#python AltAnalyze.py --input /Users/test/input/criterion1.txt --image WikiPathways --mod Ensembl --species Hs --wpid WP536
if wpid == None:
print 'Please provide a valid WikiPathways ID (e.g., WP1234)';
sys.exit()
if species == None:
print 'Please provide a valid species ID for an installed database (to install: --update Official --species Hs --version EnsMart62Plus)';
sys.exit()
if input_file_dir == None:
print 'Please provide a valid file location for your input IDs (also needs to inlcude system code and value column)';
sys.exit()
import WikiPathways_webservice
try:
print 'Attempting to output a WikiPathways colored image from user data'
print 'mod:', mod
print 'species_code:', species
print 'wpid:', wpid
print 'input GO-Elite ID file:', input_file_dir
graphic_link = WikiPathways_webservice.visualizePathwayAssociations(input_file_dir, species, mod, wpid)
except Exception, e:
if 'force_no_matching_error' in traceback.format_exc():
print '\nUnable to run!!! None of the input IDs mapped to this pathway\n'
elif 'IndexError' in traceback.format_exc():
print '\nUnable to run!!! Input ID file does not have at least 3 columns, with the second column being system code\n'
elif 'ValueError' in traceback.format_exc():
print '\nUnable to run!!! Input ID file error. Please check that you do not have extra rows with no data\n'
elif 'source_data' in traceback.format_exc():
print '\nUnable to run!!! Input ID file does not contain a valid system code\n'
elif 'goelite' in traceback.format_exc():
print '\nUnable to run!!! A valid species database needs to first be installed. For example, run:'
print 'python AltAnalyze.py --update Official --species Hs --version EnsMart65\n'
else:
print traceback.format_exc()
print '\nError generating the pathway "%s"' % wpid, '\n'
try:
printout = 'Finished exporting visualized pathway to:', graphic_link['WP']
print printout, '\n'
except Exception:
None
sys.exit()
if 'MergeFiles' in accessoryAnalysis:
#python AltAnalyze.py --accessoryAnalysis MergeFiles --input "C:\file1.txt" --input "C:\file2.txt" --output "C:\tables"
files_to_merge = []
join_option = 'Intersection'
uniqueOnly = False
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--input': files_to_merge.append(arg)
if opt == '--join': join_option = arg
if opt == '--uniqueOnly': unique_only = arg
if len(files_to_merge) < 2:
print 'Please designate two or more files to merge (--input)';
sys.exit()
UI.MergeFiles(files_to_merge, join_option, uniqueOnly, output_dir, None)
sys.exit()
if 'IDTranslation' in accessoryAnalysis:
#python AltAnalyze.py --accessoryAnalysis IDTranslation --inputIDType Symbol --outputIDType RefSeq --input "C:\file1.txt" --species Hs
inputIDType = None
outputIDType = None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--inputIDType': inputIDType = arg
if opt == '--outputIDType': outputIDType = arg
if inputIDType == None or outputIDType == None:
print 'Please designate an input ID type and and output ID type (--inputIDType Ensembl --outputIDType Symbol)';
sys.exit()
if species == None:
print "Please enter a valide species (--species)";
sys.exit()
UI.IDconverter(input_file_dir, species, inputIDType, outputIDType, None)
sys.exit()
if 'hierarchical' in image_export:
#python AltAnalyze.py --input "/Users/test/pluri.txt" --image hierarchical --row_method average --column_method single --row_metric cosine --column_metric euclidean --color_gradient red_white_blue --transpose False --PathwaySelection Apoptosis:WP254 --GeneSetSelection WikiPathways --species Hs --platform exon --display false
if input_file_dir == None:
print 'Please provide a valid file location for your input data matrix (must have an annotation row and an annotation column)';
sys.exit()
row_method = 'weighted'
column_method = 'average'
row_metric = 'cosine'
column_metric = 'cosine'
color_gradient = 'red_black_sky'
contrast = 2.5
vendor = 'Affymetrix'
GeneSelection = ''
PathwaySelection = ''
GeneSetSelection = 'None Selected'
rho = None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--row_method':
row_method = arg
if row_method == 'None': row_method = None
elif opt == '--column_method':
column_method = arg
if column_method == 'None': column_method = None
elif opt == '--row_metric':
row_metric = arg
elif opt == '--column_metric':
column_metric = arg
elif opt == '--color_gradient':
color_gradient = arg
elif opt == '--GeneSetSelection':
GeneSetSelection = arg
elif opt == '--PathwaySelection':
PathwaySelection = arg
elif opt == '--genes':
GeneSelection = arg
elif opt == '--OntologyID':
OntologyID = arg
elif opt == '--normalization':
normalization = arg
elif opt == '--justShowTheseIDs':
justShowTheseIDs = arg
elif opt == '--rho':
rho = arg
elif opt == '--clusterGOElite':
clusterGOElite = arg
elif opt == '--contrast':
try:
contrast = float(arg)
except Exception:
print '--contrast not a valid float';sys.exit()
elif opt == '--vendor':
vendor = arg
elif opt == '--display':
if arg == 'yes':
display = True
elif arg == 'True':
display = True
else:
display = False
if len(GeneSetSelection) > 0 or GeneSelection != '':
gsp = UI.GeneSelectionParameters(species, array_type, vendor)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(GeneSelection)
gsp.setOntologyID(OntologyID)
gsp.setTranspose(transpose)
gsp.setNormalize(normalization)
gsp.setJustShowTheseIDs(justShowTheseIDs)
try:
gsp.setClusterGOElite(clusterGOElite)
except Exception:
pass
if rho != None:
try:
float(rho)
gsp.setRhoCutoff(rho)
except Exception:
print 'Must enter a valid Pearson correlation cutoff (float)'
transpose = gsp ### this allows methods that don't transmit this object to also work
if row_method == 'no': row_method = None
if column_method == 'no': column_method = None
if len(GeneSetSelection) > 0:
if species == None:
print "Please enter a valide species (--species)";
sys.exit()
try:
files = unique.read_directory(input_file_dir + '/')
dir = input_file_dir
for file in files:
filename = dir + '/' + file
UI.createHeatMap(filename, row_method, row_metric, column_method, column_metric, color_gradient,
transpose, contrast, None, display=display)
except Exception:
UI.createHeatMap(input_file_dir, row_method, row_metric, column_method, column_metric, color_gradient,
transpose, contrast, None, display=display)
#import clustering; clustering.outputClusters([input_file_dir],[])
sys.exit()
if 'PCA' in image_export:
#AltAnalyze.py --input "/Users/nsalomonis/Desktop/folds.txt" --image PCA --plotType 3D --display True --labels yes
#--algorithm "t-SNE"
include_labels = 'yes'
plotType = '2D'
pca_algorithm = 'SVD'
geneSetName = None
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--labels':
include_labels = arg
if include_labels == 'True':
include_labels = 'yes'
if opt == '--plotType': plotType = arg
if opt == '--algorithm': pca_algorithm = arg
if opt == '--geneSetName': geneSetName = arg
if opt == '--zscore':
if arg == 'yes' or arg == 'True' or arg == 'true':
zscore = True
else:
zscore = False
if opt == '--display':
if arg == 'yes' or arg == 'True' or arg == 'true':
display = True
if input_file_dir == None:
print 'Please provide a valid file location for your input data matrix (must have an annotation row and an annotation column)';
sys.exit()
UI.performPCA(input_file_dir, include_labels, pca_algorithm, transpose, None, plotType=plotType,
display=display, geneSetName=geneSetName, species=species, zscore=zscore)
sys.exit()
if 'VennDiagram' in image_export:
# AltAnalyze.py --image "VennDiagram" --input "C:\file1.txt" --input "C:\file2.txt" --output "C:\graphs"
files_to_merge = []
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--input': files_to_merge.append(arg)
if opt == '--display':
if arg == 'yes' or arg == 'True' or arg == 'true':
display = True
if len(files_to_merge) < 2:
print 'Please designate two or more files to compare (--input)';
sys.exit()
UI.vennDiagram(files_to_merge, output_dir, None, display=display)
sys.exit()
if 'AltExonViewer' in image_export:
#python AltAnalyze.py --image AltExonViewer --AltResultsDir "C:\CP-hESC" --genes "ANXA7 FYN TCF3 NAV2 ETS2 MYLK ATP2A2" --species Hs --platform exon --dataType "splicing-index"
genes = []
show_introns = 'no'
geneFileDir = ''
analysisType = 'plot'
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--genes':
genes = arg
elif opt == '--dataType':
data_type = arg
elif opt == '--showIntrons':
show_introns = arg
elif opt == '--AltResultsDir':
altresult_dir = arg
elif opt == '--geneFileDir':
geneFileDir = arg
elif opt == '--analysisType':
analysisType = arg
if altresult_dir == None:
print 'Please include the location of the AltResults directory (--AltResultsDir)';
sys.exit()
if len(genes) == 0 and len(geneFileDir) == 0:
print "Please indicate the genes (--genes) or gene file location (--geneFileDir) for AltExonViewer";
sys.exit()
if species == None:
print "Please enter a valide species (--species)";
sys.exit()
if array_type == None:
print "Please enter a valide platform (--platform)";
sys.exit()
if 'AltResults' not in altresult_dir:
altresult_dir += '/AltResults/'
if 'Sashimi' in analysisType:
altresult_dir = string.split(altresult_dir, 'AltResults')[0]
genes = geneFileDir
geneFileDir = ''
elif 'raw' in data_type: ### Switch directories if expression
altanalyze_results_folder = string.replace(altresult_dir, 'AltResults', 'ExpressionInput')
altresult_dir = UI.getValidExpFile(altanalyze_results_folder)
if len(altresult_dir) == 0:
print 'No valid expression input file (e.g., exp.MyExperiment.txt) found in', altanalyze_results_folder;
sys.exit()
else:
altanalyze_results_folder = altresult_dir + '/RawSpliceData/' + species
try:
altresult_dir = UI.getValidSplicingScoreFile(altanalyze_results_folder)
except Exception, e:
print "No files found in: " + altanalyze_results_folder;
sys.exit()
if len(geneFileDir) > 0:
try:
genes = UI.importGeneList(geneFileDir) ### list of gene IDs or symbols
except Exception:
### Can occur if a directory of files is selected
try:
files = unique.read_directory(geneFileDir + '/')
gene_string = ''
for file in files:
if '.txt' in file:
filename = geneFileDir + '/' + file
genes = UI.importGeneList(filename) ### list of gene IDs or symbols
gene_string = gene_string + ',' + genes
print 'Imported genes from', file, '\n'
#print [altresult_dir];sys.exit()
UI.altExonViewer(species, platform, altresult_dir, gene_string, show_introns, analysisType,
False)
except Exception:
pass
sys.exit()
if len(genes) == 0:
print 'Please list one or more genes (--genes "ANXA7 FYN TCF3 NAV2 ETS2 MYLK ATP2A2")';
sys.exit()
try:
UI.altExonViewer(species, platform, altresult_dir, genes, show_introns, analysisType, False)
except Exception:
print traceback.format_exc()
sys.exit()
if 'network' in image_export:
#AltAnalyze.py --image network --species Hs --output "C:\GSE9440_RAW" --PathwaySelection Apoptosis:WP254 --GeneSetSelection WikiPathways
for opt, arg in options: ### Accept user input for these hierarchical clustering variables
if opt == '--update_interactions':
update_interactions = arg
elif opt == '--includeExpIDs':
includeExpIDs = arg
elif opt == '--degrees':
degrees = arg
elif opt == '--genes':
Genes = arg
inputType = 'IDs'
elif opt == '--inputType':
inputType = arg
elif opt == '--interactionDirs':
interactionDirs.append(arg)
elif opt == '--GeneSetSelection':
GeneSetSelection = arg
elif opt == '--PathwaySelection':
PathwaySelection = arg
elif opt == '--OntologyID':
OntologyID = arg
elif opt == '--display':
display = arg
if update_interactions == 'yes':
update_interactions = True
else:
update_interactions = False
if input_file_dir == None:
pass
elif len(input_file_dir) == 0:
input_file_dir = None
if len(input_exp_file) == 0: input_exp_file = None
if len(interactionDirs) == 0: interactionDirs = ['WikiPathways']
if interactionDirs == ['all']:
interactionDirs = ['WikiPathways', 'KEGG', 'BioGRID', 'TFTargets', 'common-microRNATargets',
'all-microRNATargets', 'common-DrugBank', 'all-DrugBank']
if interactionDirs == ['main']:
interactionDirs = ['WikiPathways', 'KEGG', 'BioGRID', 'TFTargets']
if interactionDirs == ['confident']:
interactionDirs = ['WikiPathways', 'KEGG', 'TFTargets']
if len(Genes) == 0: Genes = None
if output_dir == None:
pass
elif len(output_dir) == 0:
output_dir = None
if len(GeneSetSelection) == 'None Selected': GeneSetSelection = None
if includeExpIDs == 'yes':
includeExpIDs = True
else:
includeExpIDs = False
gsp = UI.GeneSelectionParameters(species, array_type, manufacturer)
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(Genes)
gsp.setOntologyID(OntologyID)
gsp.setIncludeExpIDs(includeExpIDs)
root = ''
if species == None:
print 'Please designate a species (--species).';
sys.exit()
if output_dir == None:
print 'Please designate an ouput directory (--output)';
sys.exit()
if input_file_dir != None:
if '.txt' in input_file_dir or '.sif' in input_file_dir:
UI.networkBuilder(input_file_dir, inputType, output_dir, interactionDirs, degrees, input_exp_file,
gsp, root)
else:
parent_dir = input_file_dir
dir_list = read_directory(parent_dir)
for file in dir_list:
input_file_dir = parent_dir + '/' + file
try:
UI.networkBuilder(input_file_dir, inputType, output_dir, interactionDirs, degrees,
input_exp_file, gsp, root)
except Exception:
print file, 'failed to produce network'
else:
UI.networkBuilder(None, inputType, output_dir, interactionDirs, degrees, input_exp_file, gsp, root)
sys.exit()
########## Begin database dependent AltAnalyze workflows
if ensembl_version != 'current' and 'markers' not in update_method:
dbversion = string.replace(ensembl_version, 'EnsMart', '')
UI.exportDBversion('EnsMart' + dbversion)
gene_database = unique.getCurrentGeneDatabaseVersion()
print 'Current database version:', gene_database
if array_type == None and update_dbs != 'yes' and denom_file_dir == None:
print "Please specify an array or data type (e.g., RNASeq, exon, gene, junction, AltMouse, 3'array).";
sys.exit()
if 'archive' in update_method:
###
print 'Archiving databases', ensembl_version
try:
archive_dir = 'ArchiveDBs/EnsMart' + ensembl_version + '/archive'; export.createDirPath(
filepath(archive_dir))
except Exception:
null = [] ### directory already exists
dirs = unique.read_directory('/ArchiveDBs/EnsMart' + ensembl_version)
print len(dirs), dirs
import shutil
for species_dir in dirs:
try:
#print '/ArchiveDBs/EnsMart'+ensembl_version+'/'+species_dir+'/'+species_dir+'_RNASeq.zip'
src = filepath(
'ArchiveDBs/EnsMart' + ensembl_version + '/' + species_dir + '/' + species_dir + '_RNASeq.zip')
dstn = filepath('ArchiveDBs/EnsMart' + ensembl_version + '/archive/' + species_dir + '_RNASeq.zip')
#export.copyFile(src, dstn)
shutil.move(src, dstn)
try:
srcj = string.replace(src, 'RNASeq.', 'junction.');
dstnj = string.replace(dstn, 'RNASeq.', 'junction.')
shutil.move(srcj, dstnj)
except Exception:
null = []
try:
src = string.replace(src, '_RNASeq.', '.');
dstn = string.replace(dstn, '_RNASeq.', '.')
shutil.move(src, dstn)
except Exception:
null = []
except Exception:
null = []
sys.exit()
if update_dbs == 'yes' and 'Official' not in update_method:
if 'cleanup' in update_method:
existing_species_dirs = unique.read_directory('/AltDatabase/ensembl')
print 'Deleting EnsemblSQL directory for all species, ensembl version', ensembl_version
for species in existing_species_dirs:
export.deleteFolder('AltDatabase/ensembl/' + species + '/EnsemblSQL')
existing_species_dirs = unique.read_directory('/AltDatabase')
print 'Deleting SequenceData directory for all species, ensembl version', ensembl_version
for species in existing_species_dirs:
export.deleteFolder('AltDatabase/' + species + '/SequenceData')
print 'Finished...exiting'
sys.exit()
if 'package' not in update_method and 'markers' not in update_method:
### Example:
### python AltAnalyze.py --species all --arraytype all --update all --version 60
### tr -d \\r < AltAnalyze.py > AltAnalyze_new.py
### chmod +x AltAnalyze_new.py
### nohup ./AltAnalyze.py --update all --species Mm --arraytype gene --arraytype exon --version 60 2>&1 > nohup_v60_Mm.txt
if array_type == 'all' and (species == 'Mm' or species == 'all'):
array_type = ['AltMouse', 'exon', 'gene', 'junction', 'RNASeq']
elif array_type == 'all' and (species == 'Hs' or species == 'Rn'):
array_type = ['exon', 'gene', 'junction', 'RNASeq']
else:
array_type = [array_type] + additional_array_types
if species == 'all' and 'RNASeq' not in array_type: species = selected_species ### just analyze the species for which multiple platforms are supported
if species == 'selected':
species = selected_species ### just analyze the species for which multiple platforms are supported
elif species == 'all':
all_supported_names = {};
all_species_names = {}
species_names = UI.getSpeciesInfo()
for species in species_names: all_supported_names[species_names[species]] = species
import EnsemblSQL
child_dirs, ensembl_species, ensembl_versions = EnsemblSQL.getCurrentEnsemblSpecies(
'release-' + ensembl_version)
for ens_species in ensembl_species:
ens_species = string.replace(ens_species, '_', ' ')
if ens_species in all_supported_names:
all_species_names[all_supported_names[ens_species]] = []
del all_species_names['Hs']
del all_species_names['Mm']
del all_species_names['Rn']
"""
del all_species_names['Go']
del all_species_names['Bt']
del all_species_names['Sc']
del all_species_names['Ss']
del all_species_names['Pv']
del all_species_names['Pt']
del all_species_names['La']
del all_species_names['Tt']
del all_species_names['Tr']
del all_species_names['Ts']
del all_species_names['Pb']
del all_species_names['Pc']
del all_species_names['Ec']
del all_species_names['Tb']
del all_species_names['Tg']
del all_species_names['Dn']
del all_species_names['Do']
del all_species_names['Tn']
del all_species_names['Dm']
del all_species_names['Oc']
del all_species_names['Og']
del all_species_names['Fc']
del all_species_names['Dr']
del all_species_names['Me']
del all_species_names['Cp']
del all_species_names['Tt']
del all_species_names['La']
del all_species_names['Tr']
del all_species_names['Ts']
del all_species_names['Et'] ### No alternative isoforms?
del all_species_names['Pc']
del all_species_names['Tb']
del all_species_names['Fc']
del all_species_names['Sc']
del all_species_names['Do']
del all_species_names['Dn']
del all_species_names['Og']
del all_species_names['Ga']
del all_species_names['Me']
del all_species_names['Ml']
del all_species_names['Mi']
del all_species_names['St']
del all_species_names['Sa']
del all_species_names['Cs']
del all_species_names['Vp']
del all_species_names['Ch']
del all_species_names['Ee']
del all_species_names['Ac']"""
sx = [];
all_species_names2 = [] ### Ensure that the core selected species are run first
for species in selected_species:
if species in all_species_names: sx.append(species)
for species in all_species_names:
if species not in selected_species: all_species_names2.append(species)
all_species_names = sx + all_species_names2
species = all_species_names
else:
species = [species]
update_uniprot = 'no';
update_ensembl = 'no';
update_probeset_to_ensembl = 'no';
update_domain = 'no';
update_miRs = 'no';
genomic_build = 'new';
update_miR_seq = 'yes'
if 'all' in update_method:
update_uniprot = 'yes';
update_ensembl = 'yes';
update_probeset_to_ensembl = 'yes';
update_domain = 'yes';
update_miRs = 'yes'
if 'UniProt' in update_method: update_uniprot = 'yes'
if 'Ensembl' in update_method: update_ensembl = 'yes'
if 'Probeset' in update_method or 'ExonAnnotations' in update_method: update_probeset_to_ensembl = 'yes'
if 'Domain' in update_method:
update_domain = 'yes'
try:
from Bio import Entrez #test this
except Exception:
print 'The dependent module Bio is not installed or not accessible through the default python interpretter. Existing AltAnalyze.'; sys.exit()
if 'miRBs' in update_method or 'miRBS' in update_method: update_miRs = 'yes'
if 'NewGenomeBuild' in update_method: genomic_build = 'new'
if 'current' in ensembl_version: print "Please specify an Ensembl version number (e.g., 60) before proceeding with the update.";sys.exit()
try:
force = force ### Variable is not declared otherwise
except Exception:
force = 'yes'; print 'force:', force
existing_species_dirs = {}
update_all = 'no' ### We don't pass this as yes, in order to skip certain steps when multiple array types are analyzed (others are specified above)
try:
print "Updating AltDatabase the following array_types", string.join(
array_type), "for the species", string.join(species)
except Exception:
print 'Please designate a valid platform/array_type (e.g., exon) and species code (e.g., Mm).'
for specific_species in species:
for platform_name in array_type:
if platform_name == 'AltMouse' and specific_species == 'Mm':
proceed = 'yes'
elif platform_name == 'exon' or platform_name == 'gene':
#### Check to see if the probeset.csv file is present
#try: probeset_transcript_file = ExonArrayEnsemblRules.getDirectoryFiles('/AltDatabase/'+specific_species+'/'+platform_name)
#except Exception: print "Affymetrix probeset.csv anotation file is not found. You must save this to",'/AltDatabase/'+specific_species+'/'+platform_name,'before updating (unzipped).'; sys.exit()
proceed = 'yes'
elif platform_name == 'junction' and (specific_species == 'Hs' or specific_species == 'Mm'):
proceed = 'yes'
elif platform_name == 'RNASeq':
proceed = 'yes'
else:
proceed = 'no'
if proceed == 'yes':
print "Analyzing", specific_species, platform_name
if (platform_name != array_type[0]) and len(species) == 1:
update_uniprot = 'no';
update_ensembl = 'no';
update_miR_seq = 'no' ### Don't need to do this twice in a row
print 'Skipping ensembl, uniprot and mir-sequence file import updates since already completed for this species', array_type, platform_name
if ignore_built_species == 'yes': ### Useful for when building all species for a new database build
existing_species_dirs = unique.read_directory(
'/AltDatabase/ensembl') ### call this here to update with every species - if running multiple instances
if specific_array_type != None and specific_array_type != platform_name: platform_name += '|' + specific_array_type ### For the hGlue vs. JAY arrays
if specific_species not in existing_species_dirs: ### Useful when running multiple instances of AltAnalyze to build all species
print 'update_ensembl', update_ensembl
print 'update_uniprot', update_uniprot
print 'update_probeset_to_ensembl', update_probeset_to_ensembl
print 'update_domain', update_domain
print 'update_miRs', update_miRs
update.executeParameters(specific_species, platform_name, force, genomic_build,
update_uniprot, update_ensembl, update_probeset_to_ensembl,
update_domain, update_miRs, update_all, update_miR_seq,
ensembl_version)
else:
print 'ignoring', specific_species
sys.exit()
if 'package' in update_method:
### Example: python AltAnalyze.py --update package --species all --platform all --version 65
if ensembl_version == 'current': print '\nPlease specify version of the database to package (e.g., --version 60).'; sys.exit()
ensembl_version = 'EnsMart' + ensembl_version
### Get all possible species
species_names = UI.getSpeciesInfo();
possible_species = {}
possible_species = species_names
possible_arrays = ['exon', 'gene', 'junction', 'AltMouse', 'RNASeq']
try:
if species == 'all':
possible_species = possible_species
elif species == 'selected':
possible_species = selected_species
else:
possible_species = [species]
except Exception:
species = possible_species
if array_type == None or array_type == 'all':
possible_arrays = possible_arrays
else:
possible_arrays = [array_type] + additional_array_types
species_to_package = {}
dirs = unique.read_directory('/AltDatabase/' + ensembl_version)
#print possible_arrays, possible_species; sys.exit()
for species_code in dirs:
if species_code in possible_species:
array_types = unique.read_directory('/AltDatabase/' + ensembl_version + '/' + species_code)
for arraytype in array_types:
if arraytype in possible_arrays:
if species_code in possible_species:
array_types = unique.read_directory('/AltDatabase/' + ensembl_version + '/' + species_code)
try:
species_to_package[species_code].append(arraytype)
except Exception:
species_to_package[species_code] = [arraytype]
species_to_package = eliminate_redundant_dict_values(species_to_package)
for species in species_to_package:
files_to_copy = [species + '_Ensembl_domain_aligning_probesets.txt']
files_to_copy += [species + '_Ensembl_indirect_domain_aligning_probesets.txt']
files_to_copy += [species + '_Ensembl_probesets.txt']
files_to_copy += [species + '_Ensembl_exons.txt']
#files_to_copy+=[species+'_Ensembl_junctions.txt']
files_to_copy += [species + '_exon_core.mps']
files_to_copy += [species + '_exon_extended.mps']
files_to_copy += [species + '_exon_full.mps']
files_to_copy += [species + '_gene_core.mps']
files_to_copy += [species + '_gene_extended.mps']
files_to_copy += [species + '_gene_full.mps']
files_to_copy += [species + '_gene-exon_probesets.txt']
files_to_copy += [species + '_probes_to_remove.txt']
files_to_copy += [species + '_probeset-probes.txt']
files_to_copy += [species + '_probeset_microRNAs_any.txt']
files_to_copy += [species + '_probeset_microRNAs_multiple.txt']
files_to_copy += ['probeset-domain-annotations-exoncomp.txt']
files_to_copy += ['probeset-protein-annotations-exoncomp.txt']
#files_to_copy+=['probeset-protein-dbase_exoncomp.txt']
files_to_copy += ['SEQUENCE-protein-dbase_exoncomp.txt']
files_to_copy += [species + '_Ensembl_junction_probesets.txt']
files_to_copy += [species + '_Ensembl_AltMouse_probesets.txt']
files_to_copy += [species + '_RNASeq-exon_probesets.txt']
files_to_copy += [species + '_junction-exon_probesets.txt']
files_to_copy += [species + '_junction_all.mps']
files_to_copy += [
'platform.txt'] ### Indicates the specific platform for an array type (e.g., HJAY for junction or hGlue for junction)
files_to_copy += [species + '_junction_comps_updated.txt']
files_to_copy += ['MASTER-probeset-transcript.txt']
files_to_copy += ['AltMouse-Ensembl.txt']
files_to_copy += ['AltMouse_junction-comparisons.txt']
files_to_copy += ['AltMouse_gene_annotations.txt']
files_to_copy += ['AltMouse_annotations.txt']
common_to_copy = ['uniprot/' + species + '/custom_annotations.txt']
common_to_copy += ['ensembl/' + species + '/' + species + '_Ensembl-annotations_simple.txt']
common_to_copy += ['ensembl/' + species + '/' + species + '_Ensembl-annotations.txt']
common_to_copy += ['ensembl/' + species + '/' + species + '_microRNA-Ensembl.txt']
common_to_copy += ['ensembl/' + species + '/' + species + '_Ensembl_transcript-biotypes.txt']
common_to_copy += ['ensembl/' + species + '/' + species + '_Ensembl_transcript-annotations.txt']
common_to_copy += searchDirectory("AltDatabase/ensembl/" + species + "/", 'Ensembl_Protein')
common_to_copy += searchDirectory("AltDatabase/ensembl/" + species + "/", 'ProteinFeatures')
common_to_copy += searchDirectory("AltDatabase/ensembl/" + species + "/", 'ProteinCoordinates')
supported_arrays_present = 'no'
for arraytype in selected_platforms:
if arraytype in species_to_package[
species]: supported_arrays_present = 'yes' #Hence a non-RNASeq platform is present
if supported_arrays_present == 'yes':
for file in common_to_copy:
ir = 'AltDatabase/' + ensembl_version + '/'
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + ensembl_version + '/'
export.copyFile(ir + file, er + file)
if 'RNASeq' in species_to_package[species]:
common_to_copy += ['ensembl/' + species + '/' + species + '_Ensembl_junction.txt']
common_to_copy += ['ensembl/' + species + '/' + species + '_Ensembl_exon.txt']
for file in common_to_copy:
ir = 'AltDatabase/' + ensembl_version + '/'
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + ensembl_version + '/'
if species in selected_species:
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/RNASeq/' + ensembl_version + '/' ### This allows us to build the package archive in a separate directory for selected species, so separate but overlapping content can be packaged
export.copyFile(ir + file, er + file)
for array_type in species_to_package[species]:
ir = 'AltDatabase/' + ensembl_version + '/' + species + '/' + array_type + '/'
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + ensembl_version + '/' + species + '/' + array_type + '/'
if array_type == 'junction':
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + array_type + '/'
if array_type == 'RNASeq' and species in selected_species:
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/RNASeq/' + ensembl_version + '/' + species + '/' + array_type + '/'
for file in files_to_copy:
if array_type == 'RNASeq': file = string.replace(file, '_updated.txt', '.txt')
filt_file = string.replace(file, '.txt', '-filtered.txt')
try:
export.copyFile(ir + filt_file, er + filt_file); export_path = er + filt_file
except Exception:
try:
export.copyFile(ir + file, er + file); export_path = er + file
except Exception:
null = [] ### File not found in directory
if len(export_path) > 0:
if 'AltMouse' in export_path or 'probes_' in export_path:
export.cleanFile(export_path)
if array_type == 'junction':
subdir = '/exon/'
ir = 'AltDatabase/' + ensembl_version + '/' + species + '/' + array_type + subdir
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + array_type + subdir
for file in files_to_copy:
export_path = []
filt_file = string.replace(file, '.txt', '-filtered.txt')
try:
export.copyFile(ir + filt_file, er + filt_file); export_path = er + filt_file
except Exception:
try:
export.copyFile(ir + file, er + file); export_path = er + file
except Exception:
null = [] ### File not found in directory
if array_type == 'RNASeq':
subdir = '/junction/'
ir = 'AltDatabase/' + ensembl_version + '/' + species + '/' + array_type + subdir
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + ensembl_version + '/' + species + '/' + array_type + subdir
if species in selected_species:
er = 'ArchiveDBs/' + ensembl_version + '/' + species + '/RNASeq/' + ensembl_version + '/' + species + '/' + array_type + subdir
for file in files_to_copy:
if 'SEQUENCE-protein-dbase' not in file and 'domain_aligning' not in file: ### This data is now combined into the main file
export_path = []
filt_file = string.replace(file, '.txt', '-filtered.txt')
try:
export.copyFile(ir + filt_file, er + filt_file); export_path = er + filt_file
except Exception:
try:
export.copyFile(ir + file, er + file); export_path = er + file
except Exception:
null = [] ### File not found in directory
if 'RNASeq' in species_to_package[species]:
src = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + ensembl_version
dst = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + species + '_RNASeq.zip'
if species in selected_species:
src = 'ArchiveDBs/' + ensembl_version + '/' + species + '/RNASeq/' + ensembl_version
update.zipDirectory(src);
print 'Zipping', species, array_type, dst
os.rename(src + '.zip', dst)
if supported_arrays_present == 'yes':
src = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + ensembl_version
dst = 'ArchiveDBs/' + ensembl_version + '/' + species + '/' + species + '.zip'
update.zipDirectory(src);
print 'Zipping', species, array_type, dst
os.rename(src + '.zip', dst)
if 'junction' in species_to_package[species]:
src = 'ArchiveDBs/' + ensembl_version + '/' + species + '/junction'
dst = string.replace(src, 'junction', species + '_junction.zip')
update.zipDirectory(src);
print 'Zipping', species + '_junction'
os.rename(src + '.zip', dst)
sys.exit()
if 'markers' in update_method:
if species == None or platform == None:
print "WARNING! A species and platform (e.g., exon, junction, 3'array or RNASeq) must be defined to identify markers.";
sys.exit()
elif input_exp_file == '':
print "WARNING! A input expression file must be supplied (e.g., ExpressionOutput/DATASET.YourExperimentName.txt) for this analysis.";
sys.exit()
else:
#python AltAnalyze.py --update markers --platform gene --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/Mm_Gene-TissueAtlas/ExpressionInput/exp.meta.txt"
#python AltAnalyze.py --update markers --platform gene --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/Mm_Gene-TissueAtlas/AltResults/RawSpliceData/Mm/splicing-index/meta.txt"
#python AltAnalyze.py --update markers --platform "3'array" --expdir "/home/socr/c/users2/salomoni/other/boxer/normalization/U133/ExpressionOutput/DATASET-meta.txt"
#python AltAnalyze.py --update markers --compendiumType ncRNA --platform "exon" --expdir "/home/socr/c/users2/salomoni/conklin/nsalomonis/normalization/Hs_Exon-TissueAtlas/ExpressionOutput/DATASET-meta.txt"
#python AltAnalyze.py --update markers --platform RNASeq --species Mm --geneRPKM 1 --expdir /Users/saljh8/Desktop/Grimes/MergedRSEM/DN-Analysis/ExpressionInput/exp.DN.txt --genesToReport 200
"""The markerFinder module:
1) takes an input ExpressionOutput file (DATASET.YourExperimentName.txt)
2) extracts group average expression and saves to AVERAGE.YourExperimentName.txt to the ExpressionOutput directory
3) re-imports AVERAGE.YourExperimentName.txt
4) correlates the average expression of each gene to an idealized profile to derive a Pearson correlation coefficient
5) identifies optimal markers based on these correlations for each tissue
6) exports an expression file with just these marker genes and tissues
This module can peform these analyses on protein coding or ncRNAs and can segregate the cell/tissue groups into clusters
when a group notation is present in the sample name (e.g., 0~Heart, 0~Brain, 1~Stem Cell)"""
import markerFinder
if 'AltResults' in input_exp_file and 'Clustering' not in input_exp_file:
### This applies to a file compoosed of exon-level normalized intensities (calculae average group expression)
markerFinder.getAverageExonExpression(species, platform, input_exp_file)
if 'Raw' in input_exp_file:
group_exp_file = string.replace(input_exp_file, 'Raw', 'AVERAGE')
else:
group_exp_file = string.replace(input_exp_file, 'FullDatasets', 'AVERAGE-FullDatasets')
altexon_correlation_file = markerFinder.analyzeData(group_exp_file, species, platform, compendiumType,
geneToReport=genesToReport,
correlateAll=correlateAll, AdditionalParameters=fl)
markerFinder.getExprValsForNICorrelations(platform, altexon_correlation_file, group_exp_file)
else:
### This applies to an ExpressionOutput DATASET file compoosed of gene expression values (averages already present)
import collections
try:
test_ordereddict = collections.OrderedDict()
except Exception:
try:
import ordereddict
except Exception:
### This is needed to re-order the average file so that the groups are sequentially ordered when analyzing clustered groups (0~)
print 'Warning!!!! To run markerFinder correctly call python version 2.7x or greater (python 3.x not supported)'
print 'Requires ordereddict (also can install the library ordereddict). To call 2.7: /usr/bin/python2.7'
sys.exit()
try:
output_dir = markerFinder.getAverageExpressionValues(input_exp_file,
platform) ### Either way, make an average annotated file from the DATASET file
if 'DATASET' in input_exp_file:
group_exp_file = string.replace(input_exp_file, 'DATASET', 'AVERAGE')
else:
group_exp_file = (input_exp_file, output_dir) ### still analyze the primary sample
except Exception:
### Work around when performing this analysis on an alternative exon input cluster file
group_exp_file = input_exp_file
fl = UI.ExpressionFileLocationData(input_exp_file, '', '', '');
fl.setOutputDir(export.findParentDir(export.findParentDir(input_exp_file)[:-1]))
if platform == 'RNASeq':
try:
rpkm_threshold = float(rpkm_threshold)
except Exception:
rpkm_threshold = 1.0
fl.setRPKMThreshold(rpkm_threshold)
try:
correlationDirection = direction ### correlate to a positive or inverse negative in silico artificial pattern
except Exception:
correlationDirection = 'up'
fl.setCorrelationDirection(correlationDirection)
if expression_data_format == 'non-log':
logTransform = True
else:
logTransform = False
if 'topSplice' in input_exp_file:
markerFinder.filterRNASeqSpliceEvents(species, platform, fl, input_exp_file)
sys.exit()
if 'stats.' in input_exp_file:
markerFinder.filterDetectionPvalues(species, platform, fl, input_exp_file)
sys.exit()
else:
markerFinder.analyzeData(group_exp_file, species, platform, compendiumType,
geneToReport=genesToReport, correlateAll=correlateAll,
AdditionalParameters=fl, logTransform=logTransform)
try:
fl.setVendor(manufacturer)
except Exception:
print '--vendor not indicated by user... assuming Affymetrix'
fl.setVendor('Affymetrix')
try:
markerFinder.generateMarkerHeatMaps(fl, array_type, convertNonLogToLog=logTransform)
except Exception:
print traceback.format_exc()
print 'Cell/Tissue marker classification analysis finished';
sys.exit()
if 'EnsMart' in ensembl_version:
UI.exportDBversion(ensembl_version)
annotation_found = verifyFile(input_annotation_file)
proceed = 'no'
if 'Official' not in update_method and denom_file_dir == None: ### If running GO-Elite independent of AltAnalyze (see below GO_Elite call)
try:
time_stamp = timestamp()
if len(cel_file_dir) > 0:
if output_dir == None:
output_dir = cel_file_dir
print "Setting output directory to the input path:", output_dir
if output_dir == None and input_filtered_dir > 0:
output_dir = input_filtered_dir
if '/' == output_dir[-1] or '\\' in output_dir[-2]:
null = []
else:
output_dir += '/'
log_file = filepath(output_dir + 'AltAnalyze_report-' + time_stamp + '.log')
log_report = open(log_file, 'w');
log_report.close()
sys.stdout = Logger('')
except Exception, e:
print e
print 'Please designate an output directory before proceeding (e.g., --output "C:\RNASeq)';
sys.exit()
if mappedExonAnalysis:
array_type = 'RNASeq' ### Although this is not the actual platform, the resulting data will be treated as RNA-Seq with parameters most suitable for arrays
if len(external_annotation_dir) > 0:
run_from_scratch = 'Annotate External Results'
if channel_to_extract != None:
run_from_scratch = 'Process Feature Extraction files' ### Agilent Feature Extraction files as input for normalization
manufacturer = 'Agilent'
constitutive_source = 'Agilent'
expression_threshold = 'NA'
perform_alt_analysis = 'NA'
if len(input_filtered_dir) > 0:
run_from_scratch = 'Process AltAnalyze filtered';
proceed = 'yes'
if len(input_exp_file) > 0:
run_from_scratch = 'Process Expression file';
proceed = 'yes'
input_exp_file = string.replace(input_exp_file, '\\',
'/') ### Windows convention is \ rather than /, but works with /
ief_list = string.split(input_exp_file, '/')
if len(output_dir) > 0:
parent_dir = output_dir
else:
parent_dir = string.join(ief_list[:-1], '/')
exp_name = ief_list[-1]
if len(cel_file_dir) > 0 or runKallisto == True:
# python AltAnalyze.py --species Mm --platform RNASeq --runKallisto yes --expname test
if exp_name == None:
print "No experiment name defined. Please sumbit a name (e.g., --expname CancerComp) before proceeding.";
sys.exit()
else:
dataset_name = 'exp.' + exp_name + '.txt';
exp_file_dir = filepath(output_dir + '/ExpressionInput/' + dataset_name)
if runKallisto:
run_from_scratch == 'Process RNA-seq reads'
elif run_from_scratch != 'Process Feature Extraction files':
run_from_scratch = 'Process CEL files';
proceed = 'yes'
if array_type == 'RNASeq':
file_ext = '.BED'
else:
file_ext = '.CEL'
try:
cel_files, cel_files_fn = UI.identifyCELfiles(cel_file_dir, array_type, manufacturer)
except Exception, e:
print e
if mappedExonAnalysis:
pass
else:
print "No", file_ext, "files found in the directory:", cel_file_dir;sys.exit()
if array_type != 'RNASeq': cel_file_list_dir = UI.exportCELFileList(cel_files_fn, cel_file_dir)
if groups_file != None and comps_file != None:
try:
export.copyFile(groups_file, string.replace(exp_file_dir, 'exp.', 'groups.'))
except Exception:
print 'Groups file already present in target location OR bad input path.'
try:
export.copyFile(comps_file, string.replace(exp_file_dir, 'exp.', 'comps.'))
except Exception:
print 'Comparison file already present in target location OR bad input path.'
groups_file = string.replace(exp_file_dir, 'exp.', 'groups.')
comps_file = string.replace(exp_file_dir, 'exp.', 'comps.')
if verifyGroupFileFormat(groups_file) == False:
print "\nWarning! The format of your groups file is not correct. For details, see:\nhttp://code.google.com/p/altanalyze/wiki/ManualGroupsCompsCreation\n"
sys.exit()
if array_type != 'RNASeq' and manufacturer != 'Agilent':
"""Determine if Library and Annotations for the array exist, if not, download or prompt for selection"""
try:
### For the HGLUE and HJAY arrays, this step is critical in order to have the commond-line AltAnalyze downloadthe appropriate junction database (determined from specific_array_type)
specific_array_types, specific_array_type = UI.identifyArrayType(cel_files_fn)
num_array_types = len(specific_array_types)
except Exception:
null = [];
num_array_types = 1;
specific_array_type = None
if array_type == 'exon':
if species == 'Hs': specific_array_type = 'HuEx-1_0-st-v2'
if species == 'Mm': specific_array_type = 'MoEx-1_0-st-v2'
if species == 'Rn': specific_array_type = 'RaEx-1_0-st-v2'
elif array_type == 'gene':
if species == 'Hs': specific_array_type = 'HuGene-1_0-st-v1'
if species == 'Mm': specific_array_type = 'MoGene-1_0-st-v1'
if species == 'Rn': specific_array_type = 'RaGene-1_0-st-v1'
elif array_type == 'AltMouse':
specific_array_type = 'altMouseA'
"""
elif array_type == 'junction':
if species == 'Mm': specific_array_type = 'MJAY'
if species == 'Hs': specific_array_type = 'HJAY'
"""
supproted_array_db = UI.importSupportedArrayInfo()
if specific_array_type in supproted_array_db and input_cdf_file == None and input_annotation_file == None:
sa = supproted_array_db[specific_array_type];
species = sa.Species();
array_type = sa.ArrayType()
input_cdf_file, input_annotation_file, bgp_file, clf_file = UI.getAffyFilesRemote(specific_array_type,
array_type, species)
else:
array_type = "3'array"
cdf_found = verifyFile(input_cdf_file)
annotation_found = verifyFile(input_annotation_file)
if input_cdf_file == None:
print [
specific_array_type], 'not currently supported... Please provide CDF to AltAnalyze (commandline or GUI) or manually add to AltDatabase/affymetrix/LibraryFiles';
sys.exit()
if cdf_found != "found":
### Copy valid Library files to a local AltAnalyze database directory
input_cdf_file_lower = string.lower(input_cdf_file)
if array_type == "3'array":
if '.cdf' in input_cdf_file_lower:
clf_file = '';
bgp_file = '';
assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
icf_list = string.split(input_cdf_file, '/');
cdf_short = icf_list[-1]
destination_parent = 'AltDatabase/affymetrix/LibraryFiles/'
destination_parent = osfilepath(destination_parent + cdf_short)
info_list = input_cdf_file, destination_parent;
UI.StatusWindow(info_list, 'copy')
else:
print "Valid CDF file not found. Exiting program.";sys.exit()
else:
if '.pgf' in input_cdf_file_lower:
###Check to see if the clf and bgp files are present in this directory
icf_list = string.split(input_cdf_file, '/');
parent_dir = string.join(icf_list[:-1], '/');
cdf_short = icf_list[-1]
clf_short = string.replace(cdf_short, '.pgf', '.clf')
kil_short = string.replace(cdf_short, '.pgf', '.kil') ### Only applies to the Glue array
if array_type == 'exon' or array_type == 'junction':
bgp_short = string.replace(cdf_short, '.pgf', '.antigenomic.bgp')
else:
bgp_short = string.replace(cdf_short, '.pgf', '.bgp')
dir_list = read_directory(parent_dir)
if clf_short in dir_list and bgp_short in dir_list:
pgf_file = input_cdf_file
clf_file = string.replace(pgf_file, '.pgf', '.clf')
kil_file = string.replace(pgf_file, '.pgf', '.kil') ### Only applies to the Glue array
if array_type == 'exon' or array_type == 'junction':
bgp_file = string.replace(pgf_file, '.pgf', '.antigenomic.bgp')
else:
bgp_file = string.replace(pgf_file, '.pgf', '.bgp')
assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
destination_parent = 'AltDatabase/affymetrix/LibraryFiles/'
info_list = input_cdf_file, osfilepath(destination_parent + cdf_short);
UI.StatusWindow(info_list, 'copy')
info_list = clf_file, osfilepath(destination_parent + clf_short);
UI.StatusWindow(info_list, 'copy')
info_list = bgp_file, osfilepath(destination_parent + bgp_short);
UI.StatusWindow(info_list, 'copy')
if 'Glue' in pgf_file:
info_list = kil_file, osfilepath(destination_parent + kil_short);
UI.StatusWindow(info_list, 'copy')
if annotation_found != "found" and update_dbs == 'no' and array_type != 'RNASeq' and denom_file_dir == None and manufacturer != 'Agilent':
### Copy valid Annotation files to a local AltAnalyze database directory
try:
input_annotation_lower = string.lower(input_annotation_file)
if '.csv' in input_annotation_lower:
assinged = 'yes'
###Thus the CDF or PDF file was confirmed, so copy it over to AltDatabase
icf_list = string.split(input_annotation_file, '/');
csv_short = icf_list[-1]
destination_parent = 'AltDatabase/affymetrix/' + species + '/'
info_list = input_annotation_file, filepath(destination_parent + csv_short);
UI.StatusWindow(info_list, 'copy')
except Exception:
print "No Affymetrix annotation file provided. AltAnalyze will use any .csv annotations files in AltDatabase/Affymetrix/" + species
if 'Official' in update_method and species != None:
proceed = 'yes'
elif array_type != None and species != None:
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults(array_type,
species)
ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, ORA_algorithm, resources_to_analyze, goelite_permutations, mod, returnPathways, NA = goelite_defaults
use_direct_domain_alignments_only, microRNA_prediction_method = functional_analysis_defaults
analysis_method, additional_algorithms, filter_probeset_types, analyze_all_conditions, p_threshold, alt_exon_fold_variable, additional_score, permute_p_threshold, gene_expression_cutoff, remove_intronic_junctions, perform_permutation_analysis, export_NI_values, run_MiDAS, calculate_normIntensity_p, filter_for_AS = alt_exon_defaults
dabg_p, rpkm_threshold, gene_exp_threshold, exon_exp_threshold, exon_rpkm_threshold, expression_threshold, perform_alt_analysis, analyze_as_groups, expression_data_format, normalize_feature_exp, normalize_gene_data, avg_all_for_ss, include_raw_data, probability_statistic, FDR_statistic, batch_effects, marker_finder, visualize_qc_results, run_lineage_profiler, null = expr_defaults
elif denom_file_dir != None and species != None:
proceed = 'yes' ### Only run GO-Elite
expr_defaults, alt_exon_defaults, functional_analysis_defaults, goelite_defaults = UI.importDefaults('RNASeq',
species) ### platform not relevant
ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, ORA_algorithm, resources_to_analyze, goelite_permutations, mod, returnPathways, NA = goelite_defaults
else:
print 'No species defined. Please include the species code (e.g., "--species Hs") and array type (e.g., "--arraytype exon") before proceeding.'
print '\nAlso check the printed arguments above to see if there are formatting errors, such as bad quotes.';
sys.exit()
array_type_original = array_type
#if array_type == 'gene': array_type = "3'array"
for opt, arg in options:
if opt == '--runGOElite':
run_GOElite = arg
elif opt == '--outputQCPlots':
visualize_qc_results = arg
elif opt == '--runLineageProfiler':
run_lineage_profiler = arg
elif opt == '--elitepermut':
goelite_permutations = arg
elif opt == '--method':
filter_method = arg
elif opt == '--zscore':
z_threshold = arg
elif opt == '--elitepval':
p_val_threshold = arg
elif opt == '--num':
change_threshold = arg
elif opt == '--dataToAnalyze':
resources_to_analyze = arg
elif opt == '--GEelitepval':
ge_pvalue_cutoffs = arg
elif opt == '--GEelitefold':
ge_fold_cutoffs = arg
elif opt == '--GEeliteptype':
ge_ptype = arg
elif opt == '--ORAstat':
ORA_algorithm = arg
elif opt == '--returnPathways':
returnPathways = arg
elif opt == '--FDR':
FDR_statistic = arg
elif opt == '--dabgp':
dabg_p = arg
elif opt == '--rawexp':
expression_threshold = arg
elif opt == '--geneRPKM':
rpkm_threshold = arg
elif opt == '--exonRPKM':
exon_rpkm_threshold = arg
elif opt == '--geneExp':
gene_exp_threshold = arg
elif opt == '--exonExp':
exon_exp_threshold = arg
elif opt == '--groupStat':
probability_statistic = arg
elif opt == '--avgallss':
avg_all_for_ss = arg
elif opt == '--logexp':
expression_data_format = arg
elif opt == '--inclraw':
include_raw_data = arg
elif opt == '--combat':
batch_effects = arg
elif opt == '--runalt':
perform_alt_analysis = arg
elif opt == '--altmethod':
analysis_method = arg
elif opt == '--altp':
p_threshold = arg
elif opt == '--probetype':
filter_probeset_types = arg
elif opt == '--altscore':
alt_exon_fold_variable = arg
elif opt == '--GEcutoff':
gene_expression_cutoff = arg
elif opt == '--removeIntronOnlyJunctions':
remove_intronic_junctions = arg
elif opt == '--normCounts':
normalize_feature_exp = arg
elif opt == '--normMatrix':
normalize_gene_data = arg
elif opt == '--altpermutep':
permute_p_threshold = arg
elif opt == '--altpermute':
perform_permutation_analysis = arg
elif opt == '--exportnormexp':
export_NI_values = arg
elif opt == '--buildExonExportFile':
build_exon_bedfile = 'yes'
elif opt == '--runMarkerFinder':
marker_finder = arg
elif opt == '--calcNIp':
calculate_normIntensity_p = arg
elif opt == '--runMiDAS':
run_MiDAS = arg
elif opt == '--analyzeAllGroups':
analyze_all_conditions = arg
if analyze_all_conditions == 'yes': analyze_all_conditions = 'all groups'
elif opt == '--GEcutoff':
use_direct_domain_alignments_only = arg
elif opt == '--mirmethod':
microRNA_prediction_method = arg
elif opt == '--ASfilter':
filter_for_AS = arg
elif opt == '--noxhyb':
xhyb_remove = arg
elif opt == '--returnAll':
return_all = arg
elif opt == '--annotatedir':
external_annotation_dir = arg
elif opt == '--additionalScore':
additional_score = arg
elif opt == '--additionalAlgorithm':
additional_algorithms = arg
elif opt == '--modelSize':
modelSize = arg
try:
modelSize = int(modelSize)
except Exception:
modelSize = None
elif opt == '--geneModel':
geneModel = arg # file location
if geneModel == 'no' or 'alse' in geneModel:
geneModel = False
elif opt == '--reference':
custom_reference = arg
if run_from_scratch == 'Process Feature Extraction files': ### Agilent Feature Extraction files as input for normalization
normalize_gene_data = 'quantile' ### required for Agilent
proceed = 'yes'
if returnPathways == 'no' or returnPathways == 'None':
returnPathways = None
if pipelineAnalysis == False:
proceed = 'yes'
if proceed == 'yes':
species_codes = UI.remoteSpeciesInfo()
### Update Ensembl Databases
if 'Official' in update_method:
file_location_defaults = UI.importDefaultFileLocations()
db_versions_vendors, db_versions = UI.remoteOnlineDatabaseVersions()
array_codes = UI.remoteArrayInfo()
UI.getOnlineDBConfig(file_location_defaults, '')
if len(species) == 2:
species_names = UI.getSpeciesInfo()
species_full = species_names[species]
else:
species_full = species
print 'Species name to update:', species_full
db_version_list = []
for version in db_versions: db_version_list.append(version)
db_version_list.sort();
db_version_list.reverse();
select_version = db_version_list[0]
db_versions[select_version].sort()
print 'Ensembl version', ensembl_version
if ensembl_version != 'current':
if len(ensembl_version) < 4: ensembl_version = 'EnsMart' + ensembl_version
if ensembl_version not in db_versions:
try:
UI.getOnlineEliteDatabase(file_location_defaults, ensembl_version, [species], 'no',
''); sys.exit()
except Exception:
### This is only for database that aren't officially released yet for prototyping
print ensembl_version, 'is not a valid version of Ensembl, while', select_version, 'is.';
sys.exit()
else:
select_version = ensembl_version
### Export basic species information
sc = species;
db_version = ensembl_version
if sc != None:
for ad in db_versions_vendors[db_version]:
if ad.SpeciesCodes() == species_full:
for array_system in array_codes:
ac = array_codes[array_system]
compatible_species = ac.SpeciesCodes()
if ac.Manufacturer() in ad.Manufacturer() and (
'expression' in ac.ArrayName() or 'RNASeq' in ac.ArrayName() or 'RNA-seq' in ac.ArrayName()):
if sc not in compatible_species: compatible_species.append(sc)
ac.setSpeciesCodes(compatible_species)
UI.exportArrayInfo(array_codes)
if species_full not in db_versions[select_version]:
print db_versions[select_version]
print species_full, ': This species is not available for this version %s of the Official database.' % select_version
else:
update_goelite_resources = 'no' ### This is handled separately below
UI.getOnlineEliteDatabase(file_location_defaults, ensembl_version, [species], update_goelite_resources,
'');
### Attempt to download additional Ontologies and GeneSets
if additional_resources[
0] != None: ### Indicates that the user requested the download of addition GO-Elite resources
try:
import GeneSetDownloader
print 'Adding supplemental GeneSet and Ontology Collections'
if 'all' in additional_resources:
additionalResources = UI.importResourceList() ### Get's all additional possible resources
else:
additionalResources = additional_resources
GeneSetDownloader.buildAccessoryPathwayDatabases([species], additionalResources, 'yes')
print 'Finished adding additional analysis resources.'
except Exception:
print 'Download error encountered for additional Ontologies and GeneSets...\nplease try again later.'
status = UI.verifyLineageProfilerDatabases(species, 'command-line')
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...'
if array_type == 'junction' or array_type == 'RNASeq': ### Download junction databases
try:
UI.checkForLocalArraySupport(species, array_type, specific_array_type, 'command-line')
except Exception:
print 'Please install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart65';
sys.exit()
status = UI.verifyLineageProfilerDatabases(species, 'command-line')
print "Finished adding database"
sys.exit()
try:
#print ge_fold_cutoffs,ge_pvalue_cutoffs, change_threshold, resources_to_analyze, goelite_permutations, p_val_threshold, z_threshold
change_threshold = int(change_threshold) - 1
goelite_permutations = int(goelite_permutations);
change_threshold = change_threshold
p_val_threshold = float(p_val_threshold);
z_threshold = float(z_threshold)
if ORA_algorithm == 'Fisher Exact Test':
goelite_permutations = 'FisherExactTest'
except Exception, e:
print e
print 'One of the GO-Elite input values is inapporpriate. Please review and correct.';
sys.exit()
if run_GOElite == None or run_GOElite == 'no':
goelite_permutations = 'NA' ### This haults GO-Elite from running
else:
if output_dir == None:
print "\nPlease specify an output directory using the flag --output";
sys.exit()
try:
expression_threshold = float(expression_threshold)
except Exception:
expression_threshold = 1
try:
dabg_p = float(dabg_p)
except Exception:
dabg_p = 1 ### Occurs for RNASeq
if microRNA_prediction_method == 'two or more':
microRNA_prediction_method = 'multiple'
else:
microRNA_prediction_method = 'any'
### Run GO-Elite directly from user supplied input and denominator ID folders (outside of the normal workflows)
if run_GOElite == 'yes' and pipelineAnalysis == False and '--runGOElite' in arguments:# and denom_file_dir != None:
#python AltAnalyze.py --input "/Users/nsalomonis/Desktop/Mm_sample/input_list_small" --runGOElite yes --denom "/Users/nsalomonis/Desktop/Mm_sample/denominator" --mod Ensembl --species Mm
"""if denom_file_dir == None:
print 'Please include a folder containing a valid denominator ID list for the input ID sets.'; sys.exit()"""
try:
if output_dir == None:
### Set output to the same directory or parent if none selected
i = -1 ### 1 directory up
output_dir = string.join(string.split(input_file_dir, '/')[:i], '/')
file_dirs = input_file_dir, denom_file_dir, output_dir
import GO_Elite
if ORA_algorithm == 'Fisher Exact Test':
goelite_permutations = 'FisherExactTest'
goelite_var = species, mod, goelite_permutations, filter_method, z_threshold, p_val_threshold, change_threshold, resources_to_analyze, returnPathways, file_dirs, ''
GO_Elite.remoteAnalysis(goelite_var, 'non-UI', Multi=mlp)
sys.exit()
except Exception:
print traceback.format_exc()
print "Unexpected error encountered. Please see log file.";
sys.exit()
if run_lineage_profiler == 'yes':
status = UI.verifyLineageProfilerDatabases(species, 'command-line')
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...'
if run_lineage_profiler == 'yes' and input_file_dir != None and pipelineAnalysis == False and '--runLineageProfiler' in arguments:
#python AltAnalyze.py --input "/Users/arrays/test.txt" --runLineageProfiler yes --vendor Affymetrix --platform "3'array" --species Mm --output "/Users/nsalomonis/Merrill"
#python AltAnalyze.py --input "/Users/qPCR/samples.txt" --runLineageProfiler yes --geneModel "/Users/qPCR/models.txt"
if array_type == None:
print "Please include a platform name (e.g., --platform RNASeq)";
sys.exit()
if species == None:
print "Please include a species name (e.g., --species Hs)";
sys.exit()
try:
status = UI.verifyLineageProfilerDatabases(species, 'command-line')
except ValueError:
### Occurs due to if int(gene_database[-2:]) < 65: - ValueError: invalid literal for int() with base 10: ''
print '\nPlease install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart65\n';
sys.exit()
if status == False:
print 'Please note: LineageProfiler not currently supported for this species...';
sys.exit()
try:
fl = UI.ExpressionFileLocationData('', '', '', '')
fl.setSpecies(species)
fl.setVendor(manufacturer)
fl.setPlatformType(array_type)
fl.setCompendiumType('protein_coding')
#fl.setCompendiumType('AltExon')
fl.setCompendiumPlatform(array_type)
try:
expr_input_dir
except Exception:
expr_input_dir = input_file_dir
UI.remoteLP(fl, expr_input_dir, manufacturer, custom_reference, geneModel, None, modelSize=modelSize)
#graphic_links = ExpressionBuilder.remoteLineageProfiler(fl,input_file_dir,array_type,species,manufacturer)
print_out = 'Lineage profiles and images saved to the folder "DataPlots" in the input file folder.'
print print_out
except Exception:
print traceback.format_exc()
print_out = 'Analysis error occured...\nplease see warning printouts.'
print print_out
sys.exit()
if array_type == 'junction' or array_type == 'RNASeq': ### Download junction databases
try:
UI.checkForLocalArraySupport(species, array_type, specific_array_type, 'command-line')
except Exception:
print 'Please install a valid gene database before proceeding.\n'
print 'For example: python AltAnalyze.py --species Hs --update Official --version EnsMart65';
sys.exit()
probeset_types = ['full', 'core', 'extended']
if return_all == 'yes': ### Perform no alternative exon filtering when annotating existing FIRMA or MADS results
dabg_p = 1;
expression_threshold = 1;
p_threshold = 1;
alt_exon_fold_variable = 1
gene_expression_cutoff = 10000;
filter_probeset_types = 'full';
exon_exp_threshold = 1;
rpkm_threshold = 0
gene_exp_threshold = 1;
exon_rpkm_threshold = 0
if array_type == 'RNASeq':
gene_exp_threshold = 0
else:
if array_type != "3'array":
try:
p_threshold = float(p_threshold);
alt_exon_fold_variable = float(alt_exon_fold_variable)
expression_threshold = float(expression_threshold);
gene_expression_cutoff = float(gene_expression_cutoff)
dabg_p = float(dabg_p);
additional_score = float(additional_score)
gene_expression_cutoff = float(gene_expression_cutoff)
except Exception:
try:
gene_expression_cutoff = float(gene_expression_cutoff)
except Exception:
gene_expression_cutoff = 0
try:
rpkm_threshold = float(rpkm_threshold)
except Exception:
rpkm_threshold = -1
try:
exon_exp_threshold = float(exon_exp_threshold)
except Exception:
exon_exp_threshold = 0
try:
gene_exp_threshold = float(gene_exp_threshold)
except Exception:
gene_exp_threshold = 0
try:
exon_rpkm_threshold = float(exon_rpkm_threshold)
except Exception:
exon_rpkm_threshold = 0
if filter_probeset_types not in probeset_types and array_type == 'exon':
print "Invalid probeset-type entered:", filter_probeset_types, '. Must be "full", "extended" or "core"';
sys.exit()
elif array_type == 'gene' and filter_probeset_types == 'NA':
filter_probeset_types = 'core'
if dabg_p > 1 or dabg_p <= 0:
print "Invalid DABG p-value entered:", dabg_p, '. Must be > 0 and <= 1';
sys.exit()
if expression_threshold < 1:
print "Invalid expression threshold entered:", expression_threshold, '. Must be > 1';
sys.exit()
if p_threshold > 1 or p_threshold <= 0:
print "Invalid alternative exon p-value entered:", p_threshold, '. Must be > 0 and <= 1';
sys.exit()
if alt_exon_fold_variable < 1 and analysis_method != 'ASPIRE':
print "Invalid alternative exon threshold entered:", alt_exon_fold_variable, '. Must be > 1';
sys.exit()
if gene_expression_cutoff < 1:
print "Invalid gene expression threshold entered:", gene_expression_cutoff, '. Must be > 1';
sys.exit()
if additional_score < 1:
print "Invalid additional score threshold entered:", additional_score, '. Must be > 1';
sys.exit()
if array_type == 'RNASeq':
if rpkm_threshold < 0:
print "Invalid gene RPKM threshold entered:", rpkm_threshold, '. Must be >= 0';
sys.exit()
if exon_exp_threshold < 1:
print "Invalid exon expression threshold entered:", exon_exp_threshold, '. Must be > 1';
sys.exit()
if exon_rpkm_threshold < 0:
print "Invalid exon RPKM threshold entered:", exon_rpkm_threshold, '. Must be >= 0';
sys.exit()
if gene_exp_threshold < 1:
print "Invalid gene expression threshold entered:", gene_exp_threshold, '. Must be > 1';
sys.exit()
if 'FIRMA' in additional_algorithms and array_type == 'RNASeq':
print 'FIRMA is not an available option for RNASeq... Changing this to splicing-index.'
additional_algorithms = 'splicing-index'
additional_algorithms = UI.AdditionalAlgorithms(additional_algorithms);
additional_algorithms.setScore(additional_score)
if array_type == 'RNASeq':
manufacturer = 'RNASeq'
if 'CEL' in run_from_scratch: run_from_scratch = 'Process RNA-seq reads'
if build_exon_bedfile == 'yes': run_from_scratch = 'buildExonExportFiles'
if run_from_scratch == 'Process AltAnalyze filtered': expression_data_format = 'log' ### This is switched to log no matter what, after initial import and analysis of CEL or BED files
### These variables are modified from the defaults in the module UI as below
excludeNonExpExons = True
if avg_all_for_ss == 'yes':
avg_all_for_ss = 'yes'
elif 'all exon aligning' in avg_all_for_ss or 'known exons' in avg_all_for_ss or 'expressed exons' in avg_all_for_ss:
if 'known exons' in avg_all_for_ss and array_type == 'RNASeq': excludeNonExpExons = False
avg_all_for_ss = 'yes'
else:
avg_all_for_ss = 'no'
if run_MiDAS == 'NA': run_MiDAS = 'no'
if perform_alt_analysis == 'yes':
perform_alt_analysis = 'yes'
elif perform_alt_analysis == 'expression':
perform_alt_analysis = 'expression'
elif perform_alt_analysis == 'just expression':
perform_alt_analysis = 'expression'
elif perform_alt_analysis == 'no':
perform_alt_analysis = 'expression'
elif platform != "3'array":
perform_alt_analysis = 'both'
if systemToUse != None: array_type = systemToUse
try:
permute_p_threshold = float(permute_p_threshold)
except Exception:
permute_p_threshold = permute_p_threshold
### Store variables for AltAnalyzeMain
expr_var = species, array_type, manufacturer, constitutive_source, dabg_p, expression_threshold, avg_all_for_ss, expression_data_format, include_raw_data, run_from_scratch, perform_alt_analysis
alt_var = analysis_method, p_threshold, filter_probeset_types, alt_exon_fold_variable, gene_expression_cutoff, remove_intronic_junctions, permute_p_threshold, perform_permutation_analysis, export_NI_values, analyze_all_conditions
additional_var = calculate_normIntensity_p, run_MiDAS, use_direct_domain_alignments_only, microRNA_prediction_method, filter_for_AS, additional_algorithms
goelite_var = ge_fold_cutoffs, ge_pvalue_cutoffs, ge_ptype, filter_method, z_threshold, p_val_threshold, change_threshold, resources_to_analyze, goelite_permutations, mod, returnPathways
if run_from_scratch == 'buildExonExportFiles':
fl = UI.ExpressionFileLocationData('', '', '', '');
fl.setExonBedBuildStatus('yes');
fl.setFeatureNormalization('none')
fl.setCELFileDir(cel_file_dir);
fl.setArrayType(array_type);
fl.setOutputDir(output_dir)
fl.setMultiThreading(multiThreading)
exp_file_location_db = {};
exp_file_location_db[dataset_name] = fl;
parent_dir = output_dir
perform_alt_analysis = 'expression'
if run_from_scratch == 'Process Expression file':
if len(input_exp_file) > 0:
if groups_file != None and comps_file != None:
if 'exp.' in input_exp_file:
new_exp_file = input_exp_file
else:
new_exp_file = export.findParentDir(input_exp_file) + 'exp.' + export.findFilename(
input_exp_file)
if 'ExpressionInput' not in new_exp_file:
### This expression file is not currently used (could make it the default after copying to this location)
if output_dir[-1] != '/' and output_dir[-1] != '\\':
output_dir += '/'
new_exp_file = output_dir + 'ExpressionInput/' + export.findFilename(new_exp_file)
try:
export.copyFile(input_exp_file, new_exp_file)
except Exception:
print 'Expression file already present in target location.'
try:
export.copyFile(groups_file, string.replace(new_exp_file, 'exp.', 'groups.'))
except Exception:
print 'Groups file already present in target location OR bad input path.'
try:
export.copyFile(comps_file, string.replace(new_exp_file, 'exp.', 'comps.'))
except Exception:
print 'Comparison file already present in target location OR bad input path.'
groups_file = string.replace(new_exp_file, 'exp.', 'groups.')
comps_file = string.replace(new_exp_file, 'exp.', 'comps.')
input_exp_file = new_exp_file
if verifyGroupFileFormat(groups_file) == False:
print "\nWarning! The format of your groups file is not correct. For details, see:\nhttp://code.google.com/p/altanalyze/wiki/ManualGroupsCompsCreation\n"
sys.exit()
try:
cel_files, array_linker_db = ExpressionBuilder.getArrayHeaders(input_exp_file)
if len(input_stats_file) > 1: ###Make sure the files have the same arrays and order first
cel_files2, array_linker_db2 = ExpressionBuilder.getArrayHeaders(input_stats_file)
if cel_files2 != cel_files:
print "The probe set p-value file:\n" + input_stats_file + "\ndoes not have the same array order as the\nexpression file. Correct before proceeding.";
sys.exit()
except Exception:
print '\nWARNING...Expression file not found: "' + input_exp_file + '"\n\n'; sys.exit()
exp_name = string.replace(exp_name, 'exp.', '');
dataset_name = exp_name;
exp_name = string.replace(exp_name, '.txt', '')
groups_name = 'ExpressionInput/groups.' + dataset_name;
comps_name = 'ExpressionInput/comps.' + dataset_name
groups_file_dir = output_dir + '/' + groups_name;
comps_file_dir = output_dir + '/' + comps_name
groups_found = verifyFile(groups_file_dir)
comps_found = verifyFile(comps_file_dir)
if ((groups_found != 'found' or comps_found != 'found') and analyze_all_conditions != 'all groups') or (
analyze_all_conditions == 'all groups' and groups_found != 'found'):
files_exported = UI.predictGroupsAndComps(cel_files, output_dir, exp_name)
if files_exported == 'yes':
print "AltAnalyze inferred a groups and comps file from the CEL file names."
elif run_lineage_profiler == 'yes' and input_file_dir != None and pipelineAnalysis == False and '--runLineageProfiler' in arguments:
pass
else:
print '...groups and comps files not found. Create before running AltAnalyze in command line mode.';sys.exit()
fl = UI.ExpressionFileLocationData(input_exp_file, input_stats_file, groups_file_dir, comps_file_dir)
dataset_name = exp_name
if analyze_all_conditions == "all groups":
try:
array_group_list, group_db = UI.importArrayGroupsSimple(groups_file_dir, cel_files)
except Exception:
print '...groups and comps files not found. Create before running AltAnalyze in command line mode.';
sys.exit()
print len(group_db), 'groups found'
if len(group_db) == 2: analyze_all_conditions = 'pairwise'
exp_file_location_db = {};
exp_file_location_db[exp_name] = fl
elif run_from_scratch == 'Process CEL files' or run_from_scratch == 'Process RNA-seq reads' or run_from_scratch == 'Process Feature Extraction files':
if groups_file != None and comps_file != None:
try:
shutil.copyfile(groups_file, string.replace(exp_file_dir, 'exp.', 'groups.'))
except Exception:
print 'Groups file already present in target location OR bad input path.'
try:
shutil.copyfile(comps_file, string.replace(exp_file_dir, 'exp.', 'comps.'))
except Exception:
print 'Comparison file already present in target location OR bad input path.'
stats_file_dir = string.replace(exp_file_dir, 'exp.', 'stats.')
groups_file_dir = string.replace(exp_file_dir, 'exp.', 'groups.')
comps_file_dir = string.replace(exp_file_dir, 'exp.', 'comps.')
groups_found = verifyFile(groups_file_dir)
comps_found = verifyFile(comps_file_dir)
if ((groups_found != 'found' or comps_found != 'found') and analyze_all_conditions != 'all groups') or (
analyze_all_conditions == 'all groups' and groups_found != 'found'):
if mappedExonAnalysis:
pass
else:
files_exported = UI.predictGroupsAndComps(cel_files, output_dir, exp_name)
if files_exported == 'yes': print "AltAnalyze inferred a groups and comps file from the CEL file names."
#else: print '...groups and comps files not found. Create before running AltAnalyze in command line mode.';sys.exit()
fl = UI.ExpressionFileLocationData(exp_file_dir, stats_file_dir, groups_file_dir, comps_file_dir)
exp_file_location_db = {};
exp_file_location_db[dataset_name] = fl
parent_dir = output_dir ### interchangable terms (parent_dir used with expression file import)
if analyze_all_conditions == "all groups":
array_group_list, group_db = UI.importArrayGroupsSimple(groups_file_dir, cel_files)
UI.exportGroups(exp_file_location_db, array_group_list)
print len(group_db), 'groups found'
if len(group_db) == 2: analyze_all_conditions = 'pairwise'
try:
fl.setRunKallisto(input_fastq_dir)
except Exception:
pass
elif run_from_scratch == 'Process AltAnalyze filtered':
if '.txt' in input_filtered_dir: ### Occurs if the user tries to load a specific file
dirs = string.split(input_filtered_dir, '/')
input_filtered_dir = string.join(dirs[:-1], '/')
fl = UI.ExpressionFileLocationData('', '', '', '');
dataset_name = 'filtered-exp_dir'
dirs = string.split(input_filtered_dir, 'AltExpression');
parent_dir = dirs[0]
exp_file_location_db = {};
exp_file_location_db[dataset_name] = fl
for dataset in exp_file_location_db:
fl = exp_file_location_db[dataset_name]
file_location_defaults = UI.importDefaultFileLocations()
apt_location = UI.getAPTLocations(file_location_defaults, run_from_scratch, run_MiDAS)
fl.setAPTLocation(apt_location)
if run_from_scratch == 'Process CEL files':
if xhyb_remove == 'yes' and (
array_type == 'gene' or array_type == 'junction'): xhyb_remove = 'no' ### This is set when the user mistakenly selects exon array, initially
fl.setInputCDFFile(input_cdf_file);
fl.setCLFFile(clf_file);
fl.setBGPFile(bgp_file);
fl.setXHybRemoval(xhyb_remove)
fl.setCELFileDir(cel_file_dir);
fl.setArrayType(array_type_original);
fl.setOutputDir(output_dir)
elif run_from_scratch == 'Process RNA-seq reads':
fl.setCELFileDir(cel_file_dir);
fl.setOutputDir(output_dir)
elif run_from_scratch == 'Process Feature Extraction files':
fl.setCELFileDir(cel_file_dir);
fl.setOutputDir(output_dir)
fl = exp_file_location_db[dataset];
fl.setRootDir(parent_dir)
apt_location = fl.APTLocation()
root_dir = fl.RootDir();
fl.setExonBedBuildStatus(build_exon_bedfile)
fl.setMarkerFinder(marker_finder)
fl.setFeatureNormalization(normalize_feature_exp)
fl.setNormMatrix(normalize_gene_data)
fl.setProbabilityStatistic(probability_statistic)
fl.setProducePlots(visualize_qc_results)
fl.setPerformLineageProfiler(run_lineage_profiler)
fl.setCompendiumType(compendiumType)
fl.setCompendiumPlatform(compendiumPlatform)
fl.setVendor(manufacturer)
try:
fl.setFDRStatistic(FDR_statistic)
except Exception:
pass
fl.setAnalysisMode('commandline')
fl.setBatchEffectRemoval(batch_effects)
fl.setChannelToExtract(channel_to_extract)
fl.setMultiThreading(multiThreading)
try:
fl.setExcludeLowExpressionExons(excludeNonExpExons)
except Exception:
fl.setExcludeLowExpressionExons(True)
if 'other' in manufacturer or 'Other' in manufacturer:
### For data without a primary array ID key
manufacturer = "other:3'array"
fl.setVendor(manufacturer)
if array_type == 'RNASeq': ### Post version 2.0, add variables in fl rather than below
fl.setRPKMThreshold(rpkm_threshold)
fl.setExonExpThreshold(exon_exp_threshold)
fl.setGeneExpThreshold(gene_exp_threshold)
fl.setExonRPKMThreshold(exon_rpkm_threshold)
fl.setJunctionExpThreshold(expression_threshold)
fl.setExonMapFile(exonMapFile)
fl.setPlatformType(platformType)
### Verify database presence
try:
dirs = unique.read_directory('/AltDatabase')
except Exception:
dirs = []
if species not in dirs:
print '\n' + species, 'species not yet installed. Please install before proceeding (e.g., "python AltAnalyze.py --update Official --species', species, '--version EnsMart65").'
global commandLineMode;
commandLineMode = 'yes'
AltAnalyzeMain(expr_var, alt_var, goelite_var, additional_var, exp_file_location_db, None)
else:
print 'Insufficient Flags entered (requires --species and --output)'
def cleanUpCommandArguments():
### Needed on PC
command_args = string.join(sys.argv, ' ')
arguments = string.split(command_args, ' --')
for argument in arguments:
"""
argument_list = string.split(argument,' ')
if len(argument_list)>2:
filename = string.join(argument_list[1:],' ')
argument = argument_list[0]+' '+string.replace(filename,' ','$$$')
"""
argument_list = string.split(argument, ' ')
#argument = string.join(re.findall(r"\w",argument),'')
if ':' in argument: ### Windows OS
z = string.find(argument_list[1], ':')
if z != -1 and z != 1: ### Hence, it is in the argument but not at the second position
print 'Illegal parentheses found. Please re-type these and re-run.';
sys.exit()
def runCommandLineVersion():
### This code had to be moved to a separate function to prevent iterative runs upon AltAnalyze.py re-import
command_args = string.join(sys.argv, ' ')
#try: cleanUpCommandArguments()
#except Exception: null=[]
#print [command_args];sys.exit()
if len(sys.argv[1:]) > 0 and '--' in command_args:
if '--GUI' in command_args:
AltAnalyzeSetup(
'no') ### a trick to get back to the main page of the GUI (if AltAnalyze has Tkinter conflict)
try:
commandLineRun()
except Exception:
print traceback.format_exc()
###### Determine Command Line versus GUI Control ######
command_args = string.join(sys.argv, ' ')
if len(sys.argv[1:]) > 1 and '-' in command_args:
null = []
else:
try:
import Tkinter
from Tkinter import *
import PmwFreeze
import tkFileDialog
from tkFont import Font
use_Tkinter = 'yes'
except ImportError:
use_Tkinter = 'yes'; print "\nPmw or Tkinter not found... Tkinter print out not available";
def testResultsPanel():
file = "/Users/nsalomonis/Desktop/code/AltAnalyze/datasets/3'Array/Merrill/ExpressionInput/exp.test.txt"
#QC.outputArrayQC(file)
global root;
root = Tk()
global pathway_permutations;
pathway_permutations = 'NA'
global log_file;
log_file = 'null.txt'
global array_type;
global explicit_data_type
global run_GOElite;
run_GOElite = 'run-immediately'
explicit_data_type = 'exon-only'
array_type = 'RNASeq'
fl = UI.ExpressionFileLocationData('', '', '', '')
graphic_links = []
graphic_links.append(['PCA', 'PCA.png'])
graphic_links.append(['HC', 'HC.png'])
graphic_links.append(['PCA1', 'PCA.png'])
graphic_links.append(['HC1', 'HC.png'])
graphic_links.append(['PCA2', 'PCA.png'])
graphic_links.append(['HC2', 'HC.png'])
graphic_links.append(['PCA3', 'PCA.png'])
graphic_links.append(['HC3', 'HC.png'])
graphic_links.append(['PCA4', 'PCA.png'])
graphic_links.append(['HC4', 'HC.png'])
summary_db = {}
summary_db['QC'] = graphic_links
#summary_db={}
fl.setGraphicLinks(graphic_links)
summary_db['gene_assayed'] = 1
summary_db['denominator_exp_genes'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_events'] = 1
summary_db['denominator_exp_events'] = 1
summary_db['alt_genes'] = 1
summary_db['direct_domain_genes'] = 1
summary_db['miRNA_gene_hits'] = 1
#summary_db={}
print_out = 'Analysis complete. AltAnalyze results\nexported to "AltResults/AlternativeOutput".'
dataset = 'test';
results_dir = ''
print "Analysis Complete\n";
if root != '' and root != None:
UI.InfoWindow(print_out, 'Analysis Completed!')
tl = Toplevel();
SummaryResultsWindow(tl, 'GE', results_dir, dataset, 'parent', summary_db)
print 'here'
#sys.exit()
class Logger(object):
def __init__(self, null):
self.terminal = sys.stdout
self.log = open(log_file, "w")
def write(self, message):
self.terminal.write(message)
self.log = open(log_file, "a")
self.log.write(message)
self.log.close()
def flush(self): pass
if __name__ == '__main__':
try:
mlp.freeze_support()
except Exception:
pass
#testResultsPanel()
skip_intro = 'yes'; #sys.exit()
#skip_intro = 'remoteViewer'
runCommandLineVersion()
if use_Tkinter == 'yes': AltAnalyzeSetup(skip_intro)
""" To do list:
0) (done) Integrate new network visualizationality in clustering
1) RNA-Seq and LineageProfiler: threshold based RPKM expression filtering for binary absent present gene and exon calls
2) (demo) Splicing graph/isoform visualization
3) SQLite for gene-set databases prior to clustering and network visualization
4) (done) Gene-level correlation queries for clustering
5) (explored - not good) Optional algorithm type of PCA
6) (done) Optional normalization of expression data for clustering
7) (partially) Integrate splicing factor enrichment analysis (separate module?)
8) (done) Venn diagram option
9) (done) Additional Analyses: (A) combine lists, (B) annotate ID list, (C) run marker finder directly, (D) any graph from table option, (E) network from SIF, (F) inference networks from gene-lists (protein-protein, protein-DNA, protein-splicing)
10) Optional denominator option for GO-Elite (create from input and ID system IDs)
11) Update fields in summary combined alt.exon files (key by probeset)
12) Check field names for junction, exon, RNA-Seq in summary alt.exon report
13) (done) Support additional ID types for initial import (ID select option and pulldown - Other)
14) Proper FDR p-value for alt.exon analyses (include all computed p-values)
15) Add all major clustering and LineageProfiler options to UI along with stats filtering by default
16) (done) Make GO-Elite analysis the default
17) Support R check (and response that they need it) along with GUI gcrma, agilent array, hopach, combat
18) Probe-level annotations from Ensembl (partial code in place) and probe-level RMA in R (or possibly APT) - google pgf for U133 array
19) (done) Include various gene databases for LineageProfiler in download and allow for custom databases to be used (markerFinder based)
20) (done) Quantile normalization option for any non-Affy, non-RNASeq data (check box)
21) (done) Import agilent from Feature extraction files (pull-down option)
22) Update the software from the software
Advantages of this tool kit:
0) Easiest to use, hands down
1) Established and novel functionality for transcriptome/proteomics analysis built in
2) Independent and cooperative options for RNA-Seq and array analysis (splicing and gene expression)
3) Superior functional analyses (TF-target, splicing-factor target, lineage markers, WikiPathway visualization)
4) Options for different levels of users with different integration options (multiple statistical method options, option R support)
5) Built in secondary analysis options for already processed data (graphing, clustering, biomarker discovery, pathway analysis, network visualization)
6) Incorporates highly validated alternative exon identification methods, independent and jointly
Primary Engineer Work:
0) C-library calls and/or multithreading where applicable to improve peformance.
1) MySQL or equivalent transition for all large database queries (e.g., HuEx 2.1 on-the-fly coordinate mapping).
2) Splicing-domain visualization (matplotlib).
3) Isoform-domain network visualization and WP overlays.
4) Webservice calls to in silico protein translation, domain prediction, splicing factor regulation.
5) Stand-alone integration with bedtools, QC tools, TopHat, Cufflinks, Miso (optional).
### 2.0.9
moncole integration
generic and cell classification machine learning
PCR primer design (gene centric after file selection)
BAM->BED (local SAMTools)
updated APT
"""
| apache-2.0 |
zhangjiajie/tax_benchmark | script/ete2/coretype/arraytable.py | 3 | 8974 | __VERSION__="ete2-2.2rev1026"
# -*- coding: utf-8 -*-
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://ete.cgenomics.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2011).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit are available in the documentation.
#
# More info at http://ete.cgenomics.org
#
#
# #END_LICENSE#############################################################
import sys
import re
import math
from os import path
import numpy
from ete2.parser.text_arraytable import write_arraytable, read_arraytable
__all__ = ["ArrayTable"]
class ArrayTable(object):
"""This object is thought to work with matrix datasets (like
microarrays). It allows to load the matrix an access easily to row
and column vectors. """
def __repr__(self):
return "ArrayTable (%s)" %hex(self.__hash__())
def __str__(self):
return str(self.matrix)
def __init__(self, matrix_file=None, mtype="float"):
self.colNames = []
self.rowNames = []
self.colValues = {}
self.rowValues = {}
self.matrix = None
self.mtype = None
# If matrix file is supplied
if matrix_file is not None:
read_arraytable(matrix_file, \
mtype=mtype, \
arraytable_object = self)
def get_row_vector(self,rowname):
""" Returns the vector associated to the given row name """
return self.rowValues.get(rowname,None)
def get_column_vector(self,colname):
""" Returns the vector associated to the given column name """
return self.colValues.get(colname,None)
def get_several_column_vectors(self,colnames):
""" Returns a list of vectors associated to several column names """
vectors = [self.colValues[cname] for cname in colnames]
return numpy.array(vectors)
def get_several_row_vectors(self,rownames):
""" Returns a list vectors associated to several row names """
vectors = [self.rowValues[rname] for rname in rownames]
return numpy.array(vectors)
def remove_column(self,colname):
"""Removes the given column form the current dataset """
col_value = self.colValues.pop(colname, None)
if col_value != None:
new_indexes = range(len(self.colNames))
index = self.colNames.index(colname)
self.colNames.pop(index)
new_indexes.pop(index)
newmatrix = self.matrix.swapaxes(0,1)
newmatrix = newmatrix[new_indexes].swapaxes(0,1)
self._link_names2matrix(newmatrix)
def merge_columns(self, groups, grouping_criterion):
""" Returns a new ArrayTable object in which columns are
merged according to a given criterion.
'groups' argument must be a dictionary in which keys are the
new column names, and each value is the list of current
column names to be merged.
'grouping_criterion' must be 'min', 'max' or 'mean', and
defines how numeric values will be merged.
Example:
my_groups = {'NewColumn':['column5', 'column6']}
new_Array = Array.merge_columns(my_groups, 'max')
"""
if grouping_criterion == "max":
grouping_f = get_max_vector
elif grouping_criterion == "min":
grouping_f = get_min_vector
elif grouping_criterion == "mean":
grouping_f = get_mean_vector
else:
raise ValueError, "grouping_criterion not supported. Use max|min|mean "
grouped_array = self.__class__()
grouped_matrix = []
colNames = []
alltnames = set([])
for gname,tnames in groups.iteritems():
all_vectors=[]
for tn in tnames:
if tn not in self.colValues:
raise ValueError, str(tn)+" column not found."
if tn in alltnames:
raise ValueError, str(tn)+" duplicated column name for merging"
alltnames.add(tn)
vector = self.get_column_vector(tn).astype(float)
all_vectors.append(vector)
# Store the group vector = max expression of all items in group
grouped_matrix.append(grouping_f(all_vectors))
# store group name
colNames.append(gname)
for cname in self.colNames:
if cname not in alltnames:
grouped_matrix.append(self.get_column_vector(cname))
colNames.append(cname)
grouped_array.rowNames= self.rowNames
grouped_array.colNames= colNames
vmatrix = numpy.array(grouped_matrix).transpose()
grouped_array._link_names2matrix(vmatrix)
return grouped_array
def transpose(self):
""" Returns a new ArrayTable in which current matrix is transposed. """
transposedA = self.__class__()
transposedM = self.matrix.transpose()
transposedA.colNames = list(self.rowNames)
transposedA.rowNames = list(self.colNames)
transposedA._link_names2matrix(transposedM)
# Check that everything is ok
# for n in self.colNames:
# print self.get_column_vector(n) == transposedA.get_row_vector(n)
# for n in self.rowNames:
# print self.get_row_vector(n) == transposedA.get_column_vector(n)
return transposedA
def _link_names2matrix(self, m):
""" Synchronize curent column and row names to the given matrix"""
if len(self.rowNames) != m.shape[0]:
raise ValueError , "Expecting matrix with %d rows" % m.size[0]
if len(self.colNames) != m.shape[1]:
raise ValueError , "Expecting matrix with %d columns" % m.size[1]
self.matrix = m
self.colValues.clear()
self.rowValues.clear()
# link columns names to vectors
i = 0
for colname in self.colNames:
self.colValues[colname] = self.matrix[:,i]
i+=1
# link row names to vectors
i = 0
for rowname in self.rowNames:
self.rowValues[rowname] = self.matrix[i,:]
i+=1
def write(self, fname, colnames=None):
write_arraytable(self, fname, colnames=colnames)
def get_centroid_dist(vcenter,vlist,fdist):
d = 0.0
for v in vlist:
d += fdist(v,vcenter)
return 2*(d / len(vlist))
def get_average_centroid_linkage_dist(vcenter1,vlist1,vcenter2,vlist2,fdist):
d1,d2 = 0.0, 0.0
for v in vlist1:
d1 += fdist(v,vcenter2)
for v in vlist2:
d2 += fdist(v,vcenter1)
return (d1+d2) / (len(vlist1)+len(vlist2))
def safe_mean(values):
""" Returns mean value discarding non finite values """
valid_values = []
for v in values:
if numpy.isfinite(v):
valid_values.append(v)
return numpy.mean(valid_values), numpy.std(valid_values)
def safe_mean_vector(vectors):
""" Returns mean profile discarding non finite values """
# if only one vector, avg = itself
if len(vectors)==1:
return vectors[0], numpy.zeros(len(vectors[0]))
# Takes the vector length form the first item
length = len(vectors[0])
safe_mean = []
safe_std = []
for pos in xrange(length):
pos_mean = []
for v in vectors:
if numpy.isfinite(v[pos]):
pos_mean.append(v[pos])
safe_mean.append(numpy.mean(pos_mean))
safe_std.append(numpy.std(pos_mean))
return safe_mean, safe_std
def get_mean_vector(vlist):
a = numpy.array(vlist)
return numpy.mean(a,0)
def get_median_vector(vlist):
a = numpy.array(vlist)
return numpy.median(a)
def get_max_vector(vlist):
a = numpy.array(vlist)
return numpy.max(a,0)
def get_min_vector(vlist):
a = numpy.array(vlist)
return numpy.min(a,0)
| gpl-3.0 |
ychab/mymoney | mymoney/apps/banktransactionanalytics/tests/test_views.py | 1 | 74698 | import datetime
import json
from decimal import Decimal
from django.test import TestCase, modify_settings, override_settings
from django.urls import reverse
from django_webtest import WebTest
from mymoney.apps.bankaccounts.factories import BankAccountFactory
from mymoney.apps.banktransactions.factories import BankTransactionFactory
from mymoney.apps.banktransactions.models import BankTransaction
from mymoney.apps.banktransactiontags.factories import (
BankTransactionTagFactory,
)
from mymoney.core.factories import UserFactory
from mymoney.core.utils.dates import GRANULARITY_MONTH, GRANULARITY_WEEK
from ..forms import RatioForm, TrendtimeForm
@modify_settings(MIDDLEWARE={
'remove': ['mymoney.core.middleware.AnonymousRedirectMiddleware'],
})
class AccessTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.owner = UserFactory(username='owner')
cls.not_owner = UserFactory(username='not_owner', user_permissions='staff')
cls.bankaccount = BankAccountFactory(owners=[cls.owner])
cls.banktransactiontags = [
BankTransactionTagFactory(owner=cls.owner),
BankTransactionTagFactory(owner=cls.not_owner),
]
def test_access_ratio(self):
url = reverse('banktransactionanalytics:ratio', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
# Anonymous denied
response = self.client.get(url)
self.assertEqual(403, response.status_code)
# Non owner.
self.client.force_login(self.not_owner)
response = self.client.get(url)
self.assertEqual(403, response.status_code)
# Owner.
self.client.force_login(self.owner)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
def test_access_ratio_summary(self):
url = reverse('banktransactionanalytics:ratiosummary', kwargs={
'bankaccount_pk': self.bankaccount.pk,
'tag_id': self.banktransactiontags[0].pk,
})
# Anonymous denied
response = self.client.get(url)
self.assertEqual(403, response.status_code)
# Owner without session filter.
self.client.force_login(self.owner)
response = self.client.get(url)
self.assertEqual(403, response.status_code)
# Non owner with session filter.
self.client.force_login(self.not_owner)
session = self.client.session
session['banktransactionanalyticratioform'] = {
'filters': {
'date_start': '2015-06-22',
'date_end': '2015-06-22',
'type': RatioForm.SINGLE_DEBIT,
},
}
session.save()
response = self.client.get(url)
self.assertEqual(403, response.status_code)
# Owner with session filter but wrong tag ID.
self.client.force_login(self.owner)
session = self.client.session
session['banktransactionanalyticratioform'] = {
'filters': {
'date_start': '2015-06-22',
'date_end': '2015-06-22',
'type': RatioForm.SINGLE_DEBIT,
},
}
session.save()
response = self.client.get(reverse(
'banktransactionanalytics:ratiosummary',
kwargs={
'bankaccount_pk': self.bankaccount.pk,
'tag_id': self.banktransactiontags[1].pk,
},
))
self.assertEqual(403, response.status_code)
# Owner with filter and no tag id.
response = self.client.get(reverse(
'banktransactionanalytics:ratiosummary',
kwargs={
'bankaccount_pk': self.bankaccount.pk,
'tag_id': 0,
},
))
self.assertEqual(200, response.status_code)
# Finally owner with session filter and good tag ID.
response = self.client.get(url)
self.assertEqual(200, response.status_code)
def test_access_trendtime(self):
url = reverse('banktransactionanalytics:trendtime', kwargs={
'bankaccount_pk': self.bankaccount.pk
})
# Anonymous denied
response = self.client.get(url)
self.assertEqual(403, response.status_code)
# Non owner.
self.client.force_login(self.not_owner)
response = self.client.get(url)
self.assertEqual(403, response.status_code)
# Owner.
self.client.force_login(self.owner)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
def test_access_trendtime_summary(self):
url = reverse('banktransactionanalytics:trendtimesummary', kwargs={
'bankaccount_pk': self.bankaccount.pk,
'year': '2015',
'month': '6',
'day': '9',
})
# Anonymous denied
response = self.client.get(url)
self.assertEqual(403, response.status_code)
# Non owner.
self.client.force_login(self.not_owner)
response = self.client.get(url)
self.assertEqual(403, response.status_code)
# Owner.
self.client.force_login(self.owner)
response = self.client.get(url)
self.assertEqual(200, response.status_code)
class RatioViewTestCase(WebTest):
@classmethod
def setUpTestData(cls):
cls.owner = UserFactory(username='owner')
cls.not_owner = UserFactory(username='not_owner', user_permissions='staff')
cls.bankaccount = BankAccountFactory(owners=[cls.owner])
cls.banktransactiontags = [
BankTransactionTagFactory(owner=cls.owner),
BankTransactionTagFactory(owner=cls.owner),
]
cls.url = reverse('banktransactionanalytics:ratio', kwargs={
'bankaccount_pk': cls.bankaccount.pk
})
def test_default_value(self):
form = self.app.get(self.url, user='owner').form
self.assertEqual(form['type'].value, RatioForm.SUM_DEBIT)
self.assertEqual(form['chart'].value, RatioForm.CHART_DOUGHNUT)
self.assertEqual(form['date_start'].value, '')
self.assertEqual(form['date_end'].value, '')
self.assertEqual(form['reconciled'].value, '1')
self.assertIsNone(form['tags'].value)
self.assertEqual(form['sum_min'].value, '')
self.assertEqual(form['sum_max'].value, '')
fields = {
'type': RatioForm.SINGLE_CREDIT,
'chart': RatioForm.CHART_POLAR,
'date_start': '2015-05-11',
'date_end': '2015-05-11',
'reconciled': '2',
'tags': [
str(self.banktransactiontags[0].pk),
str(self.banktransactiontags[1].pk),
],
'sum_min': '-1500',
'sum_max': '1500',
}
for name, value in fields.items():
form[name] = value
form = form.submit('filter').maybe_follow().form
self.assertEqual(fields['type'], form['type'].value)
self.assertEqual(fields['chart'], form['chart'].value)
self.assertEqual(
str(datetime.date(2015, 5, 11)),
form['date_start'].value,
)
self.assertEqual(
str(datetime.date(2015, 5, 11)),
form['date_end'].value,
)
self.assertEqual(fields['reconciled'], form['reconciled'].value)
self.assertListEqual(sorted(fields['tags']), sorted(form['tags'].value))
self.assertEqual(fields['sum_min'], form['sum_min'].value)
self.assertEqual(fields['sum_max'], form['sum_max'].value)
# Test manual reset
form['type'] = RatioForm.SUM_DEBIT
form['chart'] = RatioForm.CHART_DOUGHNUT
form['date_start'] = ''
form['date_end'] = ''
form['reconciled'] = '1'
form['tags'].force_value(None)
form['sum_min'] = ''
form['sum_max'] = ''
response = form.submit('filter').maybe_follow()
form = response.form
self.assertEqual(response.status_code, 200)
self.assertEqual(form['type'].value, RatioForm.SUM_DEBIT)
self.assertEqual(form['chart'].value, RatioForm.CHART_DOUGHNUT)
self.assertEqual(form['date_start'].value, '')
self.assertEqual(form['date_end'].value, '')
self.assertEqual(form['reconciled'].value, '1')
self.assertIsNone(form['tags'].value)
self.assertEqual(form['sum_min'].value, '')
self.assertEqual(form['sum_max'].value, '')
@override_settings(
LANGUAGE_CODE='fr-fr',
DATETIME_INPUT_FORMATS=('%d/%m/%Y',),
DECIMAL_SEPARATOR=',',
)
def test_default_value_localize(self):
form = self.app.get(self.url, user='owner').form
fields = {
'date_start': '12/05/2015',
'date_end': '12/05/2015',
'sum_min': '-300,79',
'sum_max': '-200,15',
}
for name, value in fields.items():
form[name] = value
response = form.submit('filter').maybe_follow()
form = response.form
self.assertEqual(form['date_start'].value, fields['date_start'])
self.assertEqual(form['date_end'].value, fields['date_end'])
self.assertEqual(
self.app.session['banktransactionanalyticratioform']['filters']['date_start'],
str(datetime.datetime.strptime(fields['date_start'], '%d/%m/%Y').date())
)
self.assertEqual(
self.app.session['banktransactionanalyticratioform']['filters']['date_end'],
str(datetime.datetime.strptime(fields['date_end'], '%d/%m/%Y').date())
)
self.assertEqual(fields['sum_min'], form['sum_min'].value)
self.assertEqual(fields['sum_max'], form['sum_max'].value)
self.assertEqual(
self.app.session['banktransactionanalyticratioform']['filters']['sum_min'],
fields['sum_min'].replace(',', '.'),
)
self.assertEqual(
self.app.session['banktransactionanalyticratioform']['filters']['sum_max'],
fields['sum_max'].replace(',', '.'),
)
def test_reset(self):
form = self.app.get(self.url, user='owner').form
form = form.submit('reset').maybe_follow().form
fields = {
'type': RatioForm.SINGLE_CREDIT,
'chart': RatioForm.CHART_POLAR,
'date_start': '2015-05-11',
'date_end': '2015-05-11',
'reconciled': '2',
'tags': [
str(self.banktransactiontags[0].pk),
str(self.banktransactiontags[1].pk),
],
'sum_min': '-1500',
'sum_max': '1500',
}
for name, value in fields.items():
form[name] = value
form.submit('filter').maybe_follow()
form.submit('reset').maybe_follow()
form = self.app.get(self.url, user='owner').form
self.assertEqual(form['type'].value, RatioForm.SUM_DEBIT)
self.assertEqual(form['chart'].value, RatioForm.CHART_DOUGHNUT)
self.assertFalse(form['date_start'].value)
self.assertFalse(form['date_end'].value)
self.assertEqual(form['reconciled'].value, '1')
self.assertIsNone(form['tags'].value)
self.assertFalse(form['sum_min'].value)
self.assertFalse(form['sum_max'].value)
def test_success_url(self):
form = self.app.get(self.url, user='owner').form
form['date_start'] = '2015-06-18'
form['date_end'] = '2015-06-19'
response = form.submit('filter').maybe_follow()
self.assertEqual(self.url, response.request.path)
response = response.form.submit('reset').maybe_follow()
self.assertEqual(self.url, response.request.path)
class RatioQuerysetTestCase(WebTest):
@classmethod
def setUpTestData(cls):
cls.owner = UserFactory(username='owner')
cls.banktransactiontags = [
BankTransactionTagFactory(owner=cls.owner),
BankTransactionTagFactory(owner=cls.owner),
BankTransactionTagFactory(owner=cls.owner),
]
def test_queryset(self):
bankaccount = BankAccountFactory(owners=[self.owner])
url = reverse('banktransactionanalytics:ratio', kwargs={
'bankaccount_pk': bankaccount.pk
})
# Test without session filter.
response = self.app.get(url, user='owner')
self.assertNotIn('total', response.context)
self.assertNotIn('sub_total', response.context)
self.assertNotIn('rows', response.context)
self.assertNotIn('chart_data', response.context)
# Test without bank transactions.
form = self.app.get(url, user='owner').form
form['type'] = RatioForm.SINGLE_DEBIT
form['date_start'] = '2012-06-10'
form['date_end'] = '2018-06-20'
response = form.submit('filter').maybe_follow()
self.assertNotIn('total', response.context)
self.assertNotIn('sub_total', response.context)
self.assertNotIn('rows', response.context)
self.assertNotIn('chart_data', response.context)
bt1 = BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-15.40'),
date='2015-06-13',
reconciled=True,
tag=self.banktransactiontags[0],
)
bt2 = BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('165.23'),
date='2015-06-13',
reconciled=True,
tag=self.banktransactiontags[0],
)
bt3 = BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-654.12'),
date='2015-06-14',
tag=self.banktransactiontags[0],
)
bt4 = BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-134.00'),
date='2015-06-15',
tag=self.banktransactiontags[2],
)
# No tags
bt5 = BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-45.88'),
date='2015-06-15',
reconciled=True,
)
# Inactive
bt6 = BankTransactionFactory( # noqa
bankaccount=bankaccount,
amount=Decimal('-88.54'),
date='2015-06-15',
reconciled=True,
status=BankTransaction.STATUS_INACTIVE,
tag=self.banktransactiontags[0],
)
# Another bank account
bt7 = BankTransactionFactory( # noqa
bankaccount=BankAccountFactory(),
amount=Decimal('-88.54'),
date='2015-06-14',
tag=self.banktransactiontags[0],
)
# No tags
bt8 = BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('865.23'),
date='2015-06-14',
)
# Ignored
bt9 = BankTransactionFactory( # noqa
bankaccount=bankaccount,
amount=Decimal('-88.54'),
date='2015-06-15',
reconciled=True,
status=BankTransaction.STATUS_IGNORED,
tag=self.banktransactiontags[0],
)
# Default filter.
form = form.submit('reset').maybe_follow().form
form['type'] = RatioForm.SINGLE_DEBIT
form['date_start'] = '2015-06-10'
form['date_end'] = '2015-06-20'
response = form.submit('filter').maybe_follow()
self.assertEqual(
response.context['total'],
bt1.amount + bt3.amount + bt4.amount + bt5.amount,
)
self.assertEqual(
response.context['sub_total'],
bt1.amount + bt3.amount + bt4.amount + bt5.amount,
)
self.assertListEqual(
[{
'tag_id': row['tag_id'],
'count': row['count'],
'sum': row['sum'],
'percentage': row['percentage']
} for row in response.context['rows']],
[
{
"tag_id": self.banktransactiontags[0].pk,
"count": 2,
"sum": bt1.amount + bt3.amount, # Decimal("-669.52")
"percentage": Decimal("78.82"),
},
{
"tag_id": self.banktransactiontags[2].pk,
"count": 1,
"sum": bt4.amount, # Decimal("-134.00")
"percentage": Decimal("15.78"),
},
{
"tag_id": None,
"count": 1,
"sum": bt5.amount, # Decimal("-45.88")
"percentage": Decimal("5.40"),
},
],
)
self.assertListEqual(
[row['value'] for row in json.loads(response.context['chart_data'])['data']],
[78.82, 15.78, 5.4],
)
# Type filter.
form = form.submit('reset').maybe_follow().form
form['type'] = RatioForm.SINGLE_CREDIT
form['date_start'] = '2015-06-10'
form['date_end'] = '2015-06-20'
response = form.submit('filter').maybe_follow()
self.assertEqual(
response.context['total'],
bt2.amount + bt8.amount,
)
self.assertEqual(
response.context['sub_total'],
bt2.amount + bt8.amount,
)
self.assertListEqual(
[{
'tag_id': row['tag_id'],
'count': row['count'],
'sum': row['sum'],
'percentage': row['percentage']
} for row in response.context['rows']],
[
{
"tag_id": None,
"count": 1,
"sum": bt8.amount, # Decimal("865.23")
"percentage": Decimal("83.97"),
},
{
"tag_id": self.banktransactiontags[0].pk,
"count": 1,
"sum": bt2.amount, # Decimal("165.23")
"percentage": Decimal("16.03"),
},
],
)
self.assertListEqual(
[row['value'] for row in json.loads(response.context['chart_data'])['data']],
[83.97, 16.03],
)
# Date filter.
form = form.submit('reset').maybe_follow().form
form['type'] = RatioForm.SINGLE_DEBIT
form['date_start'] = '2016-06-10'
form['date_end'] = '2016-06-20'
response = form.submit('filter').maybe_follow()
self.assertNotIn('total', response.context)
self.assertNotIn('sub_total', response.context)
self.assertNotIn('rows', response.context)
form = form.submit('reset').maybe_follow().form
form['type'] = RatioForm.SINGLE_DEBIT
form['date_start'] = '2015-06-13'
form['date_end'] = '2015-06-13'
response = form.submit('filter').maybe_follow()
self.assertEqual(
response.context['total'],
bt1.amount,
)
self.assertEqual(
response.context['sub_total'],
bt1.amount,
)
self.assertListEqual(
[{
'tag_id': row['tag_id'],
'count': row['count'],
'sum': row['sum'],
'percentage': row['percentage']
} for row in response.context['rows']],
[
{
"tag_id": self.banktransactiontags[0].pk,
"count": 1,
"sum": bt1.amount, # Decimal("-15.40")
"percentage": Decimal("100.00"),
},
],
)
self.assertListEqual(
[row['value'] for row in json.loads(response.context['chart_data'])['data']],
[100.0],
)
# Reconciled filter
form = form.submit('reset').maybe_follow().form
form['type'] = RatioForm.SINGLE_DEBIT
form['date_start'] = '2015-06-13'
form['date_end'] = '2015-06-18'
form['reconciled'] = '2'
response = form.submit('filter').maybe_follow()
self.assertEqual(
response.context['total'],
bt1.amount + bt5.amount,
)
self.assertEqual(
response.context['sub_total'],
bt1.amount + bt5.amount,
)
self.assertListEqual(
[{
'tag_id': row['tag_id'],
'count': row['count'],
'sum': row['sum'],
'percentage': row['percentage']
} for row in response.context['rows']],
[
{
"tag_id": None,
"count": 1,
"sum": bt5.amount, # Decimal("-45.88")
"percentage": Decimal("74.87"),
},
{
"tag_id": self.banktransactiontags[0].pk,
"count": 1,
"sum": bt1.amount, # Decimal("-15.40")
"percentage": Decimal("25.13"),
},
],
)
self.assertListEqual(
[row['value'] for row in json.loads(response.context['chart_data'])['data']],
[74.87, 25.13],
)
form = form.submit('reset').maybe_follow().form
form['type'] = RatioForm.SINGLE_DEBIT
form['date_start'] = '2015-06-13'
form['date_end'] = '2015-06-18'
form['reconciled'] = '3'
response = form.submit('filter').maybe_follow()
self.assertEqual(
response.context['total'],
bt3.amount + bt4.amount,
)
self.assertEqual(
response.context['sub_total'],
bt3.amount + bt4.amount,
)
self.assertListEqual(
[{
'tag_id': row['tag_id'],
'count': row['count'],
'sum': row['sum'],
'percentage': row['percentage']
} for row in response.context['rows']],
[
{
"tag_id": self.banktransactiontags[0].pk,
"count": 1,
"sum": bt3.amount, # Decimal("-654.12")
"percentage": Decimal("83.00"),
},
{
"tag_id": self.banktransactiontags[2].pk,
"count": 1,
"sum": bt4.amount, # Decimal("-134.00")
"percentage": Decimal("17.00"),
},
],
)
self.assertListEqual(
[row['value'] for row in json.loads(response.context['chart_data'])['data']],
[83.0, 17.0],
)
# Tags filter.
form = form.submit('reset').maybe_follow().form
form['type'] = RatioForm.SINGLE_DEBIT
form['date_start'] = '2015-06-13'
form['date_end'] = '2015-06-18'
form['tags'] = [self.banktransactiontags[0].pk]
response = form.submit('filter').maybe_follow()
self.assertEqual(
response.context['total'],
bt1.amount + bt3.amount + bt4.amount + bt5.amount,
)
self.assertEqual(
response.context['sub_total'],
bt1.amount + bt3.amount,
)
self.assertListEqual(
[{
'tag_id': row['tag_id'],
'count': row['count'],
'sum': row['sum'],
'percentage': row['percentage']
} for row in response.context['rows']],
[
{
"tag_id": self.banktransactiontags[0].pk,
"count": 2,
"sum": bt1.amount + bt3.amount, # Decimal("-669.52")
"percentage": Decimal("78.82"),
},
],
)
self.assertListEqual(
[row['value'] for row in json.loads(response.context['chart_data'])['data']],
[78.82],
)
form = form.submit('reset').maybe_follow().form
form['type'] = RatioForm.SINGLE_DEBIT
form['date_start'] = '2015-06-13'
form['date_end'] = '2015-06-18'
form['tags'] = [
self.banktransactiontags[0].pk,
self.banktransactiontags[2].pk,
]
response = form.submit('filter').maybe_follow()
self.assertEqual(
response.context['total'],
bt1.amount + bt3.amount + bt4.amount + bt5.amount,
)
self.assertEqual(
response.context['sub_total'],
bt1.amount + bt3.amount + bt4.amount,
)
self.assertListEqual(
[{
'tag_id': row['tag_id'],
'count': row['count'],
'sum': row['sum'],
'percentage': row['percentage']
} for row in response.context['rows']],
[
{
"tag_id": self.banktransactiontags[0].pk,
"count": 2,
"sum": bt1.amount + bt3.amount, # Decimal("-669.52")
"percentage": Decimal("78.82"),
},
{
"tag_id": self.banktransactiontags[2].pk,
"count": 1,
"sum": bt4.amount, # Decimal("-134.00")
"percentage": Decimal("15.78"),
},
],
)
self.assertListEqual(
[row['value'] for row in json.loads(response.context['chart_data'])['data']],
[78.82, 15.78],
)
form = form.submit('reset').maybe_follow().form
form['type'] = RatioForm.SINGLE_DEBIT
form['date_start'] = '2015-06-13'
form['date_end'] = '2015-06-18'
form['tags'] = [self.banktransactiontags[1].pk]
response = form.submit('filter').maybe_follow()
self.assertEqual(
response.context['total'],
bt1.amount + bt3.amount + bt4.amount + bt5.amount,
)
self.assertEqual(response.context['sub_total'], 0)
self.assertFalse(response.context['rows'])
self.assertNotIn('chart_data', response.context)
# Sum filter
form = form.submit('reset').maybe_follow().form
form['type'] = RatioForm.SINGLE_DEBIT
form['date_start'] = '2015-06-13'
form['date_end'] = '2015-06-18'
form['sum_min'] = '-200'
response = form.submit('filter').maybe_follow()
self.assertEqual(
response.context['total'],
bt1.amount + bt3.amount + bt4.amount + bt5.amount,
)
self.assertEqual(
response.context['sub_total'],
bt4.amount + bt5.amount,
)
self.assertListEqual(
[{
'tag_id': row['tag_id'],
'count': row['count'],
'sum': row['sum'],
'percentage': row['percentage']
} for row in response.context['rows']],
[
{
"tag_id": self.banktransactiontags[2].pk,
"count": 1,
"sum": bt4.amount, # Decimal("-134.00")
"percentage": Decimal("15.78"),
},
{
"tag_id": None,
"count": 1,
"sum": bt5.amount, # Decimal("-45.88")
"percentage": Decimal("5.40"),
},
],
)
self.assertListEqual(
[row['value'] for row in json.loads(response.context['chart_data'])['data']],
[15.78, 5.40],
)
form = form.submit('reset').maybe_follow().form
form['type'] = RatioForm.SINGLE_DEBIT
form['date_start'] = '2015-06-13'
form['date_end'] = '2015-06-18'
form['sum_max'] = '-100'
response = form.submit('filter').maybe_follow()
self.assertEqual(
response.context['total'],
bt1.amount + bt3.amount + bt4.amount + bt5.amount,
)
self.assertEqual(
response.context['sub_total'],
bt1.amount + bt3.amount + bt4.amount,
)
self.assertListEqual(
[{
'tag_id': row['tag_id'],
'count': row['count'],
'sum': row['sum'],
'percentage': row['percentage']
} for row in response.context['rows']],
[
{
"tag_id": self.banktransactiontags[0].pk,
"count": 2,
"sum": bt1.amount + bt3.amount, # Decimal("-669.52")
"percentage": Decimal("78.82"),
},
{
"tag_id": self.banktransactiontags[2].pk,
"count": 1,
"sum": bt4.amount, # Decimal("-134.00")
"percentage": Decimal("15.78"),
},
],
)
self.assertListEqual(
[row['value'] for row in json.loads(response.context['chart_data'])['data']],
[78.82, 15.78],
)
form = form.submit('reset').maybe_follow().form
form['type'] = RatioForm.SINGLE_DEBIT
form['date_start'] = '2015-06-13'
form['date_end'] = '2015-06-18'
form['sum_min'] = '-200'
form['sum_max'] = '-100'
response = form.submit('filter').maybe_follow()
self.assertEqual(
response.context['total'],
bt1.amount + bt3.amount + bt4.amount + bt5.amount,
)
self.assertEqual(
response.context['sub_total'],
bt4.amount,
)
self.assertListEqual(
[{
'tag_id': row['tag_id'],
'count': row['count'],
'sum': row['sum'],
'percentage': row['percentage']
} for row in response.context['rows']],
[
{
"tag_id": self.banktransactiontags[2].pk,
"count": 1,
"sum": bt4.amount, # Decimal("-134.00")
"percentage": Decimal("15.78"),
},
],
)
self.assertListEqual(
[row['value'] for row in json.loads(response.context['chart_data'])['data']],
[15.78],
)
# Sum debit type
form = form.submit('reset').maybe_follow().form
form['type'] = RatioForm.SUM_DEBIT
form['date_start'] = '2015-06-10'
form['date_end'] = '2015-06-20'
response = form.submit('filter').maybe_follow()
self.assertEqual(
response.context['total'],
(bt1.amount + bt2.amount + bt3.amount) + bt4.amount,
)
self.assertEqual(
response.context['sub_total'],
(bt1.amount + bt2.amount + bt3.amount) + bt4.amount,
)
self.assertListEqual(
[{
'tag_id': row['tag_id'],
'count': row['count'],
'sum': row['sum'],
'percentage': row['percentage']
} for row in response.context['rows']],
[
{
"tag_id": self.banktransactiontags[0].pk,
"count": 3,
"sum": bt1.amount + bt2.amount + bt3.amount, # Decimal("-504.29")
"percentage": Decimal("79.01"),
},
{
"tag_id": self.banktransactiontags[2].pk,
"count": 1,
"sum": bt4.amount, # Decimal("-134.00")
"percentage": Decimal("20.99"),
},
],
)
self.assertListEqual(
[row['value'] for row in json.loads(response.context['chart_data'])['data']],
[79.01, 20.99],
)
# Sum credit type and reconciled
form = form.submit('reset').maybe_follow().form
form['type'] = RatioForm.SUM_CREDIT
form['date_start'] = '2015-06-10'
form['date_end'] = '2015-06-20'
form['reconciled'] = '2'
response = form.submit('filter').maybe_follow()
self.assertEqual(
response.context['total'],
bt1.amount + bt2.amount
)
self.assertEqual(
response.context['sub_total'],
bt1.amount + bt2.amount
)
self.assertListEqual(
[{
'tag_id': row['tag_id'],
'count': row['count'],
'sum': row['sum'],
'percentage': row['percentage']
} for row in response.context['rows']],
[
{
"tag_id": self.banktransactiontags[0].pk,
"count": 2,
"sum": bt1.amount + bt2.amount,
"percentage": Decimal("100.00"),
},
],
)
self.assertListEqual(
[row['value'] for row in json.loads(response.context['chart_data'])['data']],
[100.00],
)
class RatioListViewTestCase(WebTest):
@classmethod
def setUpTestData(cls):
cls.owner = UserFactory(username='owner')
cls.bankaccount = BankAccountFactory(owners=[cls.owner])
cls.banktransactiontags = [
BankTransactionTagFactory(owner=cls.owner),
BankTransactionTagFactory(owner=cls.owner),
BankTransactionTagFactory(owner=cls.owner),
]
BankTransactionFactory(
bankaccount=cls.bankaccount,
date='2015-07-05',
tag=cls.banktransactiontags[0],
)
BankTransactionFactory(
bankaccount=cls.bankaccount,
date='2015-07-05',
tag=cls.banktransactiontags[1],
)
BankTransactionFactory(
bankaccount=cls.bankaccount,
date='2015-07-05',
)
cls.url = reverse('banktransactionanalytics:ratio', kwargs={
'bankaccount_pk': cls.bankaccount.pk
})
@override_settings(LANGUAGE_CODE='en-us')
def test_colors(self):
form = self.app.get(self.url, user='owner').form
form['date_start'] = '2010-01-01'
form['date_end'] = '2020-12-31'
form.submit('filter').maybe_follow()
colors = self.app.session['banktransactionanalyticratioform']['colors']
response = self.app.get(self.url, user='owner')
self.assertDictEqual(
colors,
self.app.session['banktransactionanalyticratioform']['colors'],
)
response = response.form.submit('filter').maybe_follow()
self.assertDictEqual(
colors,
self.app.session['banktransactionanalyticratioform']['colors'],
)
response = response.form.submit('reset').maybe_follow()
self.assertNotIn(
'banktransactionanalyticratioform',
self.app.session,
)
def test_reset(self):
# Required fields should be skipped for e.g, like other errors.
form = self.app.get(self.url, user='owner').form
form['sum_min'] = '15'
form['sum_max'] = '10'
response = form.submit('reset').maybe_follow()
self.assertFalse(response.context['form'].errors)
class RatioSummaryViewTestCase(WebTest):
@classmethod
def setUpTestData(cls):
cls.owner = UserFactory(username='owner')
cls.not_owner = UserFactory(username='not_owner', user_permissions='staff')
cls.banktransactiontags = [
BankTransactionTagFactory(owner=cls.owner),
BankTransactionTagFactory(owner=cls.owner),
BankTransactionTagFactory(owner=cls.owner),
BankTransactionTagFactory(owner=cls.not_owner),
]
@override_settings(LANGUAGE_CODE='en-us')
def test_queryset(self):
bankaccount = BankAccountFactory(owners=[self.owner])
url_form = reverse('banktransactionanalytics:ratio', kwargs={
'bankaccount_pk': bankaccount.pk
})
url_summary = reverse('banktransactionanalytics:ratiosummary', kwargs={
'bankaccount_pk': bankaccount.pk,
'tag_id': self.banktransactiontags[0].pk,
})
# Test without bank transactions and init filters.
form = self.app.get(url_form, user='owner').form
form['type'] = RatioForm.SINGLE_DEBIT
form['date_start'] = '2015-06-01'
form['date_end'] = '2015-06-30'
form.submit('filter').maybe_follow()
response = self.app.get(url_summary, user='owner')
self.assertIn('banktransactions', response.context)
self.assertFalse(response.context['banktransactions'])
bt1 = BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-15.40'),
date='2015-06-13',
reconciled=True,
tag=self.banktransactiontags[0],
)
bt2 = BankTransactionFactory( # noqa
bankaccount=bankaccount,
amount=Decimal('165.23'),
date='2015-06-20',
reconciled=True,
tag=self.banktransactiontags[0],
)
# Not reconciled.
bt3 = BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-654.12'),
date='2015-06-05',
tag=self.banktransactiontags[0],
)
# Too late
bt4 = BankTransactionFactory( # noqa
bankaccount=bankaccount,
amount=Decimal('-134.00'),
date='2015-12-20',
)
# No tags
bt5 = BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-134.00'),
date='2015-06-20',
)
# Other bank account
bt6 = BankTransactionFactory( # noqa
bankaccount=BankAccountFactory(),
amount=Decimal('-45.88'),
date='2015-06-15',
reconciled=True,
)
# Inactive
bt7 = BankTransactionFactory( # noqa
bankaccount=bankaccount,
amount=Decimal('-88.54'),
date='2015-06-15',
reconciled=True,
status=BankTransaction.STATUS_INACTIVE,
)
# Other tag.
bt8 = BankTransactionFactory( # noqa
bankaccount=bankaccount,
amount=Decimal('-88.54'),
date='2015-06-15',
reconciled=True,
tag=self.banktransactiontags[3],
)
# Ignored
bt9 = BankTransactionFactory( # noqa
bankaccount=bankaccount,
amount=Decimal('-88.54'),
date='2015-06-15',
reconciled=True,
status=BankTransaction.STATUS_IGNORED,
)
response = self.app.get(url_summary, user='owner')
self.assertQuerysetEqual(
response.context['banktransactions'],
[repr(bt3), repr(bt1)],
)
response = self.app.get(reverse(
'banktransactionanalytics:ratiosummary',
kwargs={
'bankaccount_pk': bankaccount.pk,
'tag_id': 0,
},
), user='owner')
self.assertQuerysetEqual(
response.context['banktransactions'],
[repr(bt5)],
)
response = form.submit('reset').maybe_follow()
form = response.form
form['type'] = RatioForm.SINGLE_DEBIT
form['date_start'] = '2012-05-10'
form['date_end'] = '2018-07-20'
form['reconciled'] = '2'
form.submit('filter').maybe_follow()
response = self.app.get(url_summary, user='owner')
self.assertQuerysetEqual(
response.context['banktransactions'],
[repr(bt1)],
)
response = form.submit('reset').maybe_follow()
form = response.form
form['type'] = RatioForm.SINGLE_DEBIT
form['date_start'] = '2012-05-10'
form['date_end'] = '2018-07-20'
form['reconciled'] = '3'
form.submit('filter').maybe_follow()
response = self.app.get(url_summary, user='owner')
self.assertQuerysetEqual(
response.context['banktransactions'],
[repr(bt3)],
)
response = form.submit('reset').maybe_follow()
form = response.form
form['type'] = RatioForm.SUM_DEBIT
form['date_start'] = '2012-06-01'
form['date_end'] = '2018-06-30'
form.submit('filter').maybe_follow()
response = self.app.get(url_summary, user='owner')
self.assertQuerysetEqual(
response.context['banktransactions'],
[repr(bt3), repr(bt1), repr(bt2)],
)
@override_settings(LANGUAGE_CODE='en-us')
def test_ajax(self):
bankaccount = BankAccountFactory(owners=[self.owner])
url_form = reverse('banktransactionanalytics:ratio', kwargs={
'bankaccount_pk': bankaccount.pk
})
url_summary = reverse('banktransactionanalytics:ratiosummary', kwargs={
'bankaccount_pk': bankaccount.pk,
'tag_id': self.banktransactiontags[0].pk,
})
form = self.app.get(url_form, user='owner').form
form['date_start'] = '2015-06-01'
form['date_end'] = '2015-06-30'
form.submit('filter').maybe_follow()
response = self.app.get(url_summary, user='owner', xhr=False)
self.assertContains(
response,
"{bankaccount}'s ratio statistics summary".format(bankaccount=bankaccount),
)
response = self.app.get(url_summary, user='owner', xhr=True)
self.assertNotContains(
response,
"{bankaccount}'s ratio statistics summary".format(bankaccount=bankaccount),
)
class RatioTemplateTestCase(WebTest):
@classmethod
def setUpTestData(cls):
cls.owner = UserFactory(username='owner')
@override_settings(LANGUAGE_CODE='en-us')
def test_no_result(self):
bankaccount = BankAccountFactory(owners=[self.owner])
url = reverse('banktransactionanalytics:ratio', kwargs={
'bankaccount_pk': bankaccount.pk
})
form = self.app.get(url, user='owner').form
form['date_start'] = '2015-01-01'
form['date_end'] = '2015-01-01'
response = form.submit('filter').maybe_follow()
self.assertContains(response, 'There is no result to your search.')
class TrendtimeViewTestCase(WebTest):
@classmethod
def setUpTestData(cls):
cls.owner = UserFactory(username='owner')
cls.bankaccount = BankAccountFactory(owners=[cls.owner])
cls.url = reverse('banktransactionanalytics:trendtime', kwargs={
'bankaccount_pk': cls.bankaccount.pk
})
@override_settings(
LANGUAGE_CODE='en-us',
DATETIME_INPUT_FORMATS=('%m/%d/%Y',),
)
def test_default_value(self):
session_key = 'banktransactionanalytictrendtimeform'
form = self.app.get(self.url, user='owner').form
self.assertEqual(form['chart'].value, TrendtimeForm.CHART_LINE)
self.assertEqual(form['granularity'].value, GRANULARITY_MONTH)
# Cannot mock built-in Python.
self.assertEqual(form['date'].value, datetime.date.today().strftime('%m/%d/%Y'))
self.assertEqual(form['reconciled'].value, '1')
fields = {
'chart': TrendtimeForm.CHART_BAR,
'granularity': GRANULARITY_WEEK,
'date': '2015-06-18',
'reconciled': '2',
}
for name, value in fields.items():
form[name] = value
response = form.submit('filter').maybe_follow()
form = response.form
self.assertEqual(fields['chart'], form['chart'].value)
self.assertEqual(fields['granularity'], form['granularity'].value)
self.assertEqual(
str(datetime.date(2015, 6, 18)),
form['date'].value,
)
self.assertDictEqual(
self.app.session[session_key]['filters']['date_kwargs'],
{
'year': 2015,
'month': 6,
'day': 18,
},
)
self.assertEqual(fields['reconciled'], form['reconciled'].value)
# Test manual reset
form['chart'] = TrendtimeForm.CHART_LINE
form['granularity'] = GRANULARITY_MONTH
form['reconciled'] = '1'
response = form.submit('filter').maybe_follow()
self.assertEqual(response.status_code, 200)
form = response.form
self.assertEqual(form['chart'].value, TrendtimeForm.CHART_LINE)
self.assertEqual(form['granularity'].value, GRANULARITY_MONTH)
self.assertEqual(form['reconciled'].value, '1')
@override_settings(
LANGUAGE_CODE='fr-fr',
DATETIME_INPUT_FORMATS=('%d/%m/%Y',),
DECIMAL_SEPARATOR=',',
)
def test_default_value_localize(self):
session_key = 'banktransactionanalytictrendtimeform'
form = self.app.get(self.url, user='owner').form
self.assertEqual(form['date'].value, datetime.date.today().strftime('%d/%m/%Y'))
form['date'] = '18/06/2015'
response = form.submit('filter').maybe_follow()
form = response.form
self.assertEqual(form['date'].value, '18/06/2015')
self.assertDictEqual(
self.app.session[session_key]['filters']['date_kwargs'],
{
'year': 2015,
'month': 6,
'day': 18,
},
)
@override_settings(LANGUAGE_CODE='en-us')
def test_reset(self):
form = self.app.get(self.url, user='owner').form
form = form.submit('reset').maybe_follow().form
fields = {
'chart': TrendtimeForm.CHART_BAR,
'granularity': GRANULARITY_WEEK,
'date': '2015-06-18',
'reconciled': '2',
}
for name, value in fields.items():
form[name] = value
form.submit('filter').maybe_follow()
form.submit('reset').maybe_follow()
form = self.app.get(self.url, user='owner').form
self.assertEqual(form['chart'].value, TrendtimeForm.CHART_LINE)
self.assertEqual(form['granularity'].value, GRANULARITY_MONTH)
self.assertEqual(form['date'].value, datetime.date.today().strftime('%m/%d/%Y'))
self.assertEqual(form['reconciled'].value, '1')
@override_settings(LANGUAGE_CODE='en-us')
def test_success_url(self):
form = self.app.get(self.url, user='owner').form
form['date'] = '2015-06-18'
response = form.submit('filter').maybe_follow()
self.assertEqual(self.url, response.request.path)
response = response.form.submit('reset').maybe_follow()
self.assertEqual(self.url, response.request.path)
@override_settings(LANGUAGE_CODE='en-us')
def test_queryset_balance(self):
bankaccount = BankAccountFactory(
balance_initial=Decimal('150'),
owners=[self.owner],
)
url = reverse('banktransactionanalytics:trendtime', kwargs={
'bankaccount_pk': bankaccount.pk
})
# No filter.
response = self.app.get(url, user='owner')
self.assertNotIn('balance_initial', response.context)
# No ones.
form = response.form
form['date'] = '2015-06-05'
response = form.submit('filter').maybe_follow()
self.assertNotIn('balance_initial', response.context)
bt1 = BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-15.40'),
date='2015-05-13',
reconciled=True,
)
form = response.form
form['date'] = '2015-05-20'
response = form.submit('filter').maybe_follow()
self.assertEqual(
response.context['balance_initial'],
bankaccount.balance_initial,
)
bt2 = BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('165.23'),
date='2015-05-13',
reconciled=True,
)
# Not reconciled
bt3 = BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-654.12'),
date='2015-05-14',
)
# Another bank account
bt4 = BankTransactionFactory( # noqa
bankaccount=BankAccountFactory(),
amount=Decimal('-654.12'),
date='2015-05-14',
reconciled=True,
)
# Current, not previous
bt5 = BankTransactionFactory( # noqa
bankaccount=bankaccount,
amount=Decimal('-654.12'),
date='2015-06-14',
)
bt6 = BankTransactionFactory( # noqa
bankaccount=bankaccount,
amount=Decimal('-654.12'),
date='2015-06-14',
reconciled=True,
)
# Inactive
bt7 = BankTransactionFactory( # noqa
bankaccount=bankaccount,
amount=Decimal('-654.12'),
date='2015-05-14',
status=BankTransaction.STATUS_INACTIVE,
)
# Ignored
bt8 = BankTransactionFactory( # noqa
bankaccount=bankaccount,
amount=Decimal('-654.12'),
date='2015-05-14',
status=BankTransaction.STATUS_IGNORED,
)
form = response.form
form['date'] = '2015-06-05'
form['granularity'] = GRANULARITY_MONTH
response = form.submit('filter').maybe_follow()
self.assertEqual(
response.context['balance_initial'],
bankaccount.balance_initial + bt1.amount + bt2.amount + bt3.amount,
)
form = response.form
form['date'] = '2015-06-05'
form['granularity'] = GRANULARITY_MONTH
form['reconciled'] = '2'
response = form.submit('filter').maybe_follow()
self.assertEqual(
response.context['balance_initial'],
bankaccount.balance_initial + bt1.amount + bt2.amount,
)
form = response.form
form['date'] = '2015-06-05'
form['granularity'] = GRANULARITY_MONTH
form['reconciled'] = '3'
response = form.submit('filter').maybe_follow()
self.assertEqual(
response.context['balance_initial'],
bankaccount.balance_initial + bt3.amount,
)
@override_settings(LANGUAGE_CODE='en-us')
def test_queryset_items(self):
bankaccount = BankAccountFactory(owners=[self.owner])
url = reverse('banktransactionanalytics:trendtime', kwargs={
'bankaccount_pk': bankaccount.pk
})
response = self.app.get(url, user='owner')
form = response.form
form['date'] = '2015-06-20'
form['granularity'] = GRANULARITY_MONTH
response = form.submit('filter').maybe_follow()
self.assertNotIn('rows', response.context)
self.assertNotIn('chart_data', response.context)
BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-15.40'),
date='2015-06-02',
reconciled=True,
)
BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-45.40'),
date='2015-06-03',
reconciled=True,
)
BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('150.23'),
date='2015-06-03',
reconciled=True,
)
BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-10.79'),
date='2015-06-05',
reconciled=True,
)
# Not reconciled
BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-69.00'),
date='2015-06-05',
)
# Other bank account.
BankTransactionFactory(
bankaccount=BankAccountFactory(),
amount=Decimal('-39.90'),
date='2015-06-05',
reconciled=True,
)
# Inactive
BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-19.90'),
date='2015-06-05',
reconciled=True,
status=BankTransaction.STATUS_INACTIVE,
)
# Ignored
BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-19.90'),
date='2015-06-05',
reconciled=True,
status=BankTransaction.STATUS_IGNORED,
)
form = response.form
form['date'] = '2015-06-15'
form['granularity'] = GRANULARITY_MONTH
response = form.submit('filter').maybe_follow()
self.assertListEqual(
response.context['rows'],
[
{
'date': datetime.date(2015, 6, 2),
'count': 1,
'balance': Decimal('-15.40'),
'delta': Decimal('-15.40'),
'percentage': 0,
},
{
'date': datetime.date(2015, 6, 3),
'count': 2,
'balance': Decimal('89.43'),
'delta': Decimal('104.83'),
'percentage': Decimal('-680.71'),
},
{
'date': datetime.date(2015, 6, 4),
'count': 0,
'balance': Decimal('89.43'),
'delta': 0,
'percentage': 0,
},
{
'date': datetime.date(2015, 6, 5),
'count': 2,
'balance': Decimal('9.64'),
'delta': Decimal('-79.79'),
'percentage': Decimal('-89.22'),
},
],
)
self.assertListEqual(
json.loads(response.context['chart_data'])['data']['datasets'][0]['data'],
[-15.40, 89.43, 89.43, 9.64],
)
# Reconciled
form = response.form
form['date'] = '2015-06-15'
form['reconciled'] = '2'
form['granularity'] = GRANULARITY_MONTH
response = form.submit('filter').maybe_follow()
self.assertListEqual(
response.context['rows'],
[
{
'date': datetime.date(2015, 6, 2),
'count': 1,
'balance': Decimal('-15.40'),
'delta': Decimal('-15.40'),
'percentage': 0,
},
{
'date': datetime.date(2015, 6, 3),
'count': 2,
'balance': Decimal('89.43'),
'delta': Decimal('104.83'),
'percentage': Decimal('-680.71'),
},
{
'date': datetime.date(2015, 6, 4),
'count': 0,
'balance': Decimal('89.43'),
'delta': 0,
'percentage': 0,
},
{
'date': datetime.date(2015, 6, 5),
'count': 1,
'balance': Decimal('78.64'),
'delta': Decimal('-10.79'),
'percentage': Decimal('-12.07'),
},
],
)
self.assertListEqual(
json.loads(response.context['chart_data'])['data']['datasets'][0]['data'],
[-15.40, 89.43, 89.43, 78.64],
)
# Not reconciled
form = response.form
form['date'] = '2015-06-15'
form['reconciled'] = '3'
form['granularity'] = GRANULARITY_MONTH
response = form.submit('filter').maybe_follow()
self.assertListEqual(
response.context['rows'],
[
{
'date': datetime.date(2015, 6, 5),
'count': 1,
'balance': Decimal('-69.00'),
'delta': Decimal('-69.00'),
'percentage': 0,
},
],
)
self.assertListEqual(
json.loads(response.context['chart_data'])['data']['datasets'][0]['data'],
[-69.00],
)
response = form.submit('reset').maybe_follow()
BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-15.79'),
date='2015-05-30',
reconciled=True,
)
BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('236.78'),
date='2015-07-02',
reconciled=True,
)
form = response.form
form['date'] = '2015-06-15'
form['granularity'] = GRANULARITY_MONTH
response = form.submit('filter').maybe_follow()
self.assertDictEqual(
response.context['rows'][0],
{
'date': datetime.date(2015, 6, 1),
'count': 0,
'balance': Decimal('-15.79'),
'delta': 0,
'percentage': 0,
},
)
self.assertDictEqual(
response.context['rows'][5],
{
'date': datetime.date(2015, 6, 6),
'count': 0,
'balance': Decimal('-6.15'),
'delta': 0,
'percentage': 0,
},
)
self.assertDictEqual(
response.context['rows'][6],
{
'date': datetime.date(2015, 6, 7),
'count': 0,
'balance': Decimal('-6.15'),
'delta': 0,
'percentage': 0,
},
)
self.assertEqual(len(response.context['rows']), 30)
self.assertEqual(
json.loads(response.context['chart_data'])['data']['datasets'][0]['data'][0],
-15.79,
)
self.assertEqual(
json.loads(response.context['chart_data'])['data']['datasets'][0]['data'][5],
-6.15,
)
self.assertEqual(
json.loads(response.context['chart_data'])['data']['datasets'][0]['data'][6],
-6.15,
)
@override_settings(LANGUAGE_CODE='en-us')
def test_queryset_date_range(self):
bankaccount = BankAccountFactory(owners=[self.owner])
url = reverse('banktransactionanalytics:trendtime', kwargs={
'bankaccount_pk': bankaccount.pk
})
# No filter.
response = self.app.get(url, user='owner')
# No ones exists yet.
form = response.form
form['date'] = '2015-02-05'
form['granularity'] = GRANULARITY_MONTH
response = form.submit('filter').maybe_follow()
self.assertNotIn('rows', response.context)
self.assertContains(
response,
'There is no result corresponding to your search.',
)
BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-15.40'),
date='2015-06-13',
reconciled=True,
)
# No ones before.
form = response.form
form['date'] = '2015-05-05'
form['granularity'] = GRANULARITY_MONTH
response = form.submit('filter').maybe_follow()
self.assertNotIn('rows', response.context)
self.assertContains(
response,
'There is no result corresponding to your search.',
)
# Yes for the current.
form = response.form
form['date'] = '2015-06-05'
form['granularity'] = GRANULARITY_MONTH
response = form.submit('filter').maybe_follow()
self.assertIn('rows', response.context)
self.assertNotContains(
response,
'There is no result corresponding to your search.',
)
BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-15.40'),
date='2015-04-13',
reconciled=True,
)
BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-15.40'),
date='2015-03-13',
)
BankTransactionFactory(
bankaccount=BankAccountFactory(),
amount=Decimal('-15.40'),
date='2015-02-13',
reconciled=True,
)
BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-15.40'),
date='2015-02-13',
status=BankTransaction.STATUS_INACTIVE,
reconciled=True,
)
BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-15.40'),
date='2015-02-13',
status=BankTransaction.STATUS_IGNORED,
reconciled=True,
)
# No ones before.
form = response.form
form['date'] = '2015-02-05'
form['granularity'] = GRANULARITY_MONTH
response = form.submit('filter').maybe_follow()
self.assertNotIn('rows', response.context)
self.assertContains(
response,
'There is no result corresponding to your search.',
)
# Having one in same granularity.
form = response.form
form['date'] = '2015-03-05'
form['granularity'] = GRANULARITY_MONTH
response = form.submit('filter').maybe_follow()
self.assertIn('rows', response.context)
self.assertNotContains(
response,
'There is no result corresponding to your search.',
)
# No one reconciled
form = response.form
form['date'] = '2015-03-25'
form['granularity'] = GRANULARITY_MONTH
form['reconciled'] = '2'
response = form.submit('filter').maybe_follow()
self.assertNotIn('rows', response.context)
self.assertContains(
response,
'There is no result corresponding to your search.',
)
# Got one (not reconciled)
form = response.form
form['date'] = '2015-03-05'
form['granularity'] = GRANULARITY_MONTH
form['reconciled'] = '3'
response = form.submit('filter').maybe_follow()
self.assertIn('rows', response.context)
self.assertNotContains(
response,
'There is no result corresponding to your search.',
)
# No one available next.
form = response.form.submit('reset').maybe_follow().form
form['date'] = '2015-07-05'
form['granularity'] = GRANULARITY_MONTH
response = form.submit('filter').maybe_follow()
self.assertNotIn('rows', response.context)
self.assertContains(
response,
'There is no result corresponding to your search.',
)
# No one exists, but having before AND after.
form = response.form.submit('reset').maybe_follow().form
form['date'] = '2015-05-05'
form['granularity'] = GRANULARITY_MONTH
response = form.submit('filter').maybe_follow()
self.assertIn('rows', response.context)
self.assertNotContains(
response,
'There is no result corresponding to your search.',
)
def test_monthly_paginator(self):
bankaccount = BankAccountFactory(balance=0, owners=[self.owner])
url = reverse('banktransactionanalytics:trendtime', kwargs={
'bankaccount_pk': bankaccount.pk
})
BankTransactionFactory(bankaccount=bankaccount, date='2015-06-15')
BankTransactionFactory(bankaccount=bankaccount, date='2015-06-25')
BankTransactionFactory(bankaccount=bankaccount, date='2015-07-15')
BankTransactionFactory(bankaccount=bankaccount, date='2015-07-15')
BankTransactionFactory(bankaccount=bankaccount, date='2015-08-15')
response = self.app.get(url, user='owner')
form = response.form
form['date'] = '2015-06-02'
form['granularity'] = GRANULARITY_MONTH
response = form.submit('filter').maybe_follow()
self.assertEqual(len(response.context[0]['rows']), 16)
self.assertEqual(
response.context[0]['page_obj'].paginator.date_min,
datetime.date(2015, 6, 1),
)
self.assertEqual(
response.context[0]['page_obj'].paginator.date_max,
datetime.date(2015, 8, 31),
)
self.assertEqual(
response.context[0]['page_obj'].date,
datetime.date(2015, 6, 2),
)
pager_prev_url = url + '?date=2015-05-01'
with self.assertRaises(IndexError):
response.click(href=pager_prev_url)
pager_next_url = '\?date=2015-07-01'
response = response.click(href=pager_next_url)
self.assertEqual(len(response.context[0]['rows']), 31)
self.assertEqual(
response.context[0]['page_obj'].date,
datetime.date(2015, 7, 1),
)
self.assertEqual(
response.form['date'].value,
'2015-06-02',
)
pager_next_url = '\?date=2015-08-01'
response = response.click(href=pager_next_url)
self.assertEqual(len(response.context[0]['rows']), 15)
self.assertEqual(
response.context[0]['page_obj'].date,
datetime.date(2015, 8, 1),
)
pager_next_url = '\?date=2015-09-01'
with self.assertRaises(IndexError):
response.click(href=pager_next_url)
# Try to insert a fake date manually. Date filter should be used
# instead.
response = self.app.get(url + '?date=foo', user='owner')
self.assertEqual(len(response.context[0]['rows']), 16)
self.assertEqual(
response.context[0]['page_obj'].date,
datetime.date(2015, 6, 2),
)
@override_settings(LANGUAGE_CODE='en-us')
def test_weekly_paginator(self):
bankaccount = BankAccountFactory(balance=0, owners=[self.owner])
url = reverse('banktransactionanalytics:trendtime', kwargs={
'bankaccount_pk': bankaccount.pk
})
BankTransactionFactory(bankaccount=bankaccount, date='2015-07-14')
BankTransactionFactory(bankaccount=bankaccount, date='2015-07-16')
BankTransactionFactory(bankaccount=bankaccount, date='2015-07-21')
BankTransactionFactory(bankaccount=bankaccount, date='2015-07-21')
BankTransactionFactory(bankaccount=bankaccount, date='2015-07-29')
response = self.app.get(url, user='owner')
form = response.form
form['date'] = '2015-07-13'
form['granularity'] = GRANULARITY_WEEK
response = form.submit('filter').maybe_follow()
self.assertEqual(len(response.context[0]['rows']), 5)
self.assertEqual(
response.context[0]['page_obj'].paginator.date_min,
datetime.date(2015, 7, 12),
)
self.assertEqual(
response.context[0]['page_obj'].paginator.date_max,
datetime.date(2015, 8, 1),
)
self.assertEqual(
response.context[0]['page_obj'].date,
datetime.date(2015, 7, 13),
)
pager_prev_url = url + '?date=2015-07-05'
with self.assertRaises(IndexError):
response.click(href=pager_prev_url)
pager_next_url = '\?date=2015-07-19'
response = response.click(href=pager_next_url)
self.assertEqual(len(response.context[0]['rows']), 7)
self.assertEqual(
response.context[0]['page_obj'].date,
datetime.date(2015, 7, 19),
)
self.assertEqual(
response.form['date'].value,
'2015-07-13',
)
pager_next_url = '\?date=2015-07-26'
response = response.click(href=pager_next_url)
self.assertEqual(len(response.context[0]['rows']), 4)
self.assertEqual(
response.context[0]['page_obj'].date,
datetime.date(2015, 7, 26),
)
pager_next_url = '\?date=2015-08-02'
with self.assertRaises(IndexError):
response.click(href=pager_next_url)
# Try to insert a fake date manually. Date filter should be used
# instead.
response = self.app.get(url + '?date=foo', user='owner')
self.assertEqual(len(response.context[0]['rows']), 5)
self.assertEqual(
response.context[0]['page_obj'].date,
datetime.date(2015, 7, 13),
)
class TrendtimeSummaryViewTestCase(WebTest):
@classmethod
def setUpTestData(cls):
cls.owner = UserFactory(username='owner')
cls.banktransactiontags = [
BankTransactionTagFactory(owner=cls.owner),
BankTransactionTagFactory(owner=cls.owner),
]
@override_settings(LANGUAGE_CODE='en-us')
def test_queryset(self):
bankaccount = BankAccountFactory(owners=[self.owner])
url_form = reverse('banktransactionanalytics:trendtime', kwargs={
'bankaccount_pk': bankaccount.pk
})
url_summary = reverse('banktransactionanalytics:trendtimesummary', kwargs={
'bankaccount_pk': bankaccount.pk,
'year': '2015',
'month': '6',
'day': '15',
})
# Test without bank transactions and init filters.
form = self.app.get(url_form, user='owner').form
form['date'] = '2015-06-01'
form.submit('filter').maybe_follow()
response = self.app.get(url_summary, user='owner')
self.assertIn('banktransactions', response.context)
self.assertFalse(response.context['banktransactions'])
# Test with dummy date.
response = self.app.get(
reverse('banktransactionanalytics:trendtimesummary', kwargs={
'bankaccount_pk': bankaccount.pk,
'year': '9999',
'month': '99',
'day': '99',
}),
user='owner',
)
self.assertIn('banktransactions', response.context)
self.assertFalse(response.context['banktransactions'])
# Test with invalid date.
response = self.app.get(
reverse('banktransactionanalytics:trendtimesummary', kwargs={
'bankaccount_pk': bankaccount.pk,
'year': '2015',
'month': '2',
'day': '31',
}),
user='owner',
)
self.assertIn('banktransactions', response.context)
self.assertFalse(response.context['banktransactions'])
bt1 = BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-15.40'),
date='2015-06-15',
reconciled=True,
tag=self.banktransactiontags[0],
)
bt2 = BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('165.23'),
date='2015-06-15',
reconciled=True,
tag=self.banktransactiontags[0],
)
# Not reconciled.
bt3 = BankTransactionFactory(
bankaccount=bankaccount,
amount=Decimal('-654.12'),
date='2015-06-15',
tag=self.banktransactiontags[1],
)
# Too late
bt4 = BankTransactionFactory( # noqa
bankaccount=bankaccount,
amount=Decimal('-134.00'),
date='2015-12-16',
reconciled=False,
)
# Other bank account
bt5 = BankTransactionFactory( # noqa
bankaccount=BankAccountFactory(),
amount=Decimal('-45.88'),
date='2015-06-15',
reconciled=True,
)
# Inactive
bt6 = BankTransactionFactory( # noqa
bankaccount=bankaccount,
amount=Decimal('-88.54'),
date='2015-06-15',
reconciled=True,
status=BankTransaction.STATUS_INACTIVE,
)
# Ignored
bt7 = BankTransactionFactory( # noqa
bankaccount=bankaccount,
amount=Decimal('-88.54'),
date='2015-06-15',
reconciled=True,
status=BankTransaction.STATUS_IGNORED,
)
response = self.app.get(url_summary, user='owner')
self.assertQuerysetEqual(
response.context['banktransactions'],
[repr(bt1), repr(bt2), repr(bt3)],
)
response = form.submit('reset').maybe_follow()
form = response.form
form['date'] = '2012-06-15'
form['reconciled'] = '2'
form.submit('filter').maybe_follow()
response = self.app.get(url_summary, user='owner')
self.assertQuerysetEqual(
response.context['banktransactions'],
[repr(bt1), repr(bt2)],
)
response = form.submit('reset').maybe_follow()
form = response.form
form['date'] = '2012-06-15'
form['reconciled'] = '3'
form.submit('filter').maybe_follow()
response = self.app.get(url_summary, user='owner')
self.assertQuerysetEqual(
response.context['banktransactions'],
[repr(bt3)],
)
@override_settings(LANGUAGE_CODE='en-us')
def test_ajax(self):
bankaccount = BankAccountFactory(owners=[self.owner])
url_form = reverse('banktransactionanalytics:trendtime', kwargs={
'bankaccount_pk': bankaccount.pk
})
url_summary = reverse('banktransactionanalytics:trendtimesummary', kwargs={
'bankaccount_pk': bankaccount.pk,
'year': '2015',
'month': '6',
'day': '15',
})
form = self.app.get(url_form, user='owner').form
form['date'] = '2015-06-01'
form.submit('filter').maybe_follow()
response = self.app.get(url_summary, user='owner', xhr=False)
self.assertContains(
response,
"{bankaccount}'s trendtime statistics summary".format(bankaccount=bankaccount),
)
response = self.app.get(url_summary, user='owner', xhr=True)
self.assertNotContains(
response,
"{bankaccount}'s trendtime statistics summary".format(bankaccount=bankaccount),
)
| bsd-3-clause |
jmargeta/scikit-learn | sklearn/tests/test_multiclass.py | 3 | 12251 | import numpy as np
import warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge,
Perceptron)
from sklearn.tree import DecisionTreeClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn import datasets
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
# FIXME: - should use sets
# - should move to metrics module
def multilabel_precision(Y_true, Y_pred):
n_predictions = 0
n_correct = 0
for i in range(len(Y_true)):
n_predictions += len(Y_pred[i])
for label in Y_pred[i]:
if label in Y_true[i]:
n_correct += 1
return float(n_correct) / n_predictions
def multilabel_recall(Y_true, Y_pred):
n_labels = 0
n_correct = 0
for i in range(len(Y_true)):
n_labels += len(Y_true[i])
for label in Y_pred[i]:
if label in Y_true[i]:
n_correct += 1
return float(n_correct) / n_labels
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovr.predict, [])
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
clf = LinearSVC(random_state=0)
pred2 = clf.fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_greater(np.mean(iris.target == pred), 0.65)
def test_ovr_always_present():
# Test that ovr works with classes that are always present or absent
X = np.ones((10, 2))
X[:5, :] = 0
y = [[int(i >= 5), 2, 3] for i in range(10)]
with warnings.catch_warnings(record=True):
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(X, y)
y_pred = ovr.predict(X)
assert_array_equal(np.array(y_pred), np.array(y))
def test_ovr_multilabel():
# Toy dataset where features correspond directly to labels.
X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]])
y = [["spam", "eggs"], ["spam"], ["ham", "eggs", "spam"],
["ham", "eggs"], ["ham"]]
#y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]]
Y = np.array([[0, 1, 1],
[0, 1, 0],
[1, 1, 1],
[1, 0, 1],
[1, 0, 0]])
classes = set("ham eggs spam".split())
for base_clf in (MultinomialNB(), LinearSVC(random_state=0),
LinearRegression(), Ridge(),
ElasticNet(), Lasso(alpha=0.5)):
# test input as lists of tuples
clf = OneVsRestClassifier(base_clf).fit(X, y)
assert_equal(set(clf.classes_), classes)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_equal(set(y_pred), set(["spam", "eggs"]))
assert_true(clf.multilabel_)
# test input as label indicator matrix
clf = OneVsRestClassifier(base_clf).fit(X, Y)
y_pred = clf.predict([[0, 4, 4]])[0]
assert_array_equal(y_pred, [0, 1, 1])
assert_true(clf.multilabel_)
def test_ovr_fit_predict_svc():
ovr = OneVsRestClassifier(svm.SVC())
ovr.fit(iris.data, iris.target)
assert_equal(len(ovr.estimators_), 3)
assert_greater(ovr.score(iris.data, iris.target), .9)
def test_ovr_multilabel_dataset():
base_clf = MultinomialNB(alpha=1)
for au, prec, recall in zip((True, False), (0.65, 0.74), (0.72, 0.84)):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=2,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
assert_true(clf.multilabel_)
assert_almost_equal(multilabel_precision(Y_test, Y_pred), prec,
decimal=2)
assert_almost_equal(multilabel_recall(Y_test, Y_pred), recall,
decimal=2)
def test_ovr_multilabel_predict_proba():
base_clf = MultinomialNB(alpha=1)
for au in (False, True):
X, Y = datasets.make_multilabel_classification(n_samples=100,
n_features=20,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=au,
random_state=0)
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = [tuple(l.nonzero()[0]) for l in (Y_proba > 0.5)]
assert_equal(pred, Y_pred)
def test_ovr_single_label_predict_proba():
base_clf = MultinomialNB(alpha=1)
X, Y = iris.data, iris.target
X_train, Y_train = X[:80], Y[:80]
X_test, Y_test = X[80:], Y[80:]
clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train)
# decision function only estimator. Fails in current implementation.
decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train)
assert_raises(AttributeError, decision_only.predict_proba, X_test)
Y_pred = clf.predict(X_test)
Y_proba = clf.predict_proba(X_test)
assert_almost_equal(Y_proba.sum(axis=1), 1.0)
# predict assigns a label if the probability that the
# sample has the label is greater than 0.5.
pred = np.array([l.argmax() for l in Y_proba])
assert_false((pred - Y_pred).any())
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovr_pipeline():
# Test with pipeline of length one
# This test is needed because the multiclass estimators may fail to detect
# the presence of predict_proba or decision_function.
clf = Pipeline([("tree", DecisionTreeClassifier())])
ovr_pipe = OneVsRestClassifier(clf)
ovr_pipe.fit(iris.data, iris.target)
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data))
def test_ovr_coef_():
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
ovr.fit(iris.data, iris.target)
shape = ovr.coef_.shape
assert_equal(shape[0], n_classes)
assert_equal(shape[1], iris.data.shape[1])
def test_ovr_coef_exceptions():
# Not fitted exception!
ovr = OneVsRestClassifier(LinearSVC(random_state=0))
# lambda is needed because we don't want coef_ to be evaluated right away
assert_raises(ValueError, lambda x: ovr.coef_, None)
# Doesn't have coef_ exception!
ovr = OneVsRestClassifier(DecisionTreeClassifier())
ovr.fit(iris.data, iris.target)
assert_raises(AttributeError, lambda x: ovr.coef_, None)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC(random_state=0))
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_ties():
# test that ties are broken using the decision function, not defaulting to
# the smallest label
X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]])
y = np.array([2, 0, 1, 2])
multi_clf = OneVsOneClassifier(Perceptron())
ovo_prediction = multi_clf.fit(X, y).predict(X)
# recalculate votes to make sure we have a tie
predictions = np.vstack([clf.predict(X) for clf in multi_clf.estimators_])
scores = np.vstack([clf.decision_function(X)
for clf in multi_clf.estimators_])
# classifiers are in order 0-1, 0-2, 1-2
# aggregate votes:
votes = np.zeros((4, 3))
votes[np.arange(4), predictions[0]] += 1
votes[np.arange(4), 2 * predictions[1]] += 1
votes[np.arange(4), 1 + predictions[2]] += 1
# for the first point, there is one vote per class
assert_array_equal(votes[0, :], 1)
# for the rest, there is no tie and the prediction is the argmax
assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:])
# for the tie, the prediction is the class with the highest score
assert_equal(ovo_prediction[0], 1)
# score for one is greater than score for zero
assert_greater(scores[2, 0] - scores[0, 0], scores[0, 0] + scores[1, 0])
# score for one is greater than score for two
assert_greater(scores[2, 0] - scores[0, 0], -scores[1, 0] - scores[2, 0])
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0))
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0)
ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC(random_state=0),
random_state=0)
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator_.estimators_[0].C
assert_true(best_C in Cs)
| bsd-3-clause |
LevinJ/Supply-demand-forecasting | implement/decisiontreemodel.py | 1 | 1078 | import sys
import os
sys.path.insert(0, os.path.abspath('..'))
from utility.sklearnbasemodel import BaseModel
import numpy as np
from sklearn.tree import DecisionTreeRegressor
from utility.datafilepath import g_singletonDataFilePath
from preprocess.splittrainvalidation import HoldoutSplitMethod
import matplotlib.pyplot as plt
class DecisionTreeModel(BaseModel):
def __init__(self):
BaseModel.__init__(self)
self.save_final_model = False
self.do_cross_val = False
return
def setClf(self):
min_samples_split = 10
self.clf = DecisionTreeRegressor(random_state=0, min_samples_split= min_samples_split)
return
def after_test(self):
plt.show()
return
def getTunedParamterOptions(self):
# tuned_parameters = [{'min_samples_split': np.arange(2, 10, 1)}]
# tuned_parameters = [{'min_samples_split': [5, 8,10,12]}]
tuned_parameters = [{'min_samples_split': [10]}]
return tuned_parameters
if __name__ == "__main__":
obj= DecisionTreeModel()
obj.run() | mit |
kundan2510/pixelCNN | cifar10_gen.py | 1 | 3795 | from keras.datasets import cifar10
import numpy
from generic_utils import *
from models import Model
from layers import WrapperLayer, pixelConv, Softmax
import theano
import theano.tensor as T
import lasagne
import random
from plot_images import plot_25_figure
DIM = 32
GRAD_CLIP = 1.
Q_LEVELS = 256
BATCH_SIZE = 20
PRINT_EVERY = 250
EPOCH = 100
OUT_DIR = '/Tmp/kumarkun/cifar10'
create_folder_if_not_there(OUT_DIR)
model = Model(name = "CIFAR10.pixelCNN")
is_train = T.scalar()
X = T.tensor4('X') # shape: (batchsize, channels, height, width)
X_r = T.itensor4('X_r')
X_transformed = X_r.dimshuffle(0,2,3,1)
input_layer = WrapperLayer(X.dimshuffle(0,2,3,1)) # input reshaped to (batchsize, height, width,3)
pixel_CNN = pixelConv(
input_layer,
3,
DIM,
Q_LEVELS = Q_LEVELS,
name = model.name + ".pxCNN",
num_layers = 12,
)
model.add_layer(pixel_CNN)
output_probab = Softmax(pixel_CNN).output()
cost = T.nnet.categorical_crossentropy(
output_probab.reshape((-1,output_probab.shape[output_probab.ndim - 1])),
X_r.flatten()
).mean()
# in nats
output_image = sample_from_softmax(output_probab)
model.print_params()
params = model.get_params()
grads = T.grad(cost, wrt=params, disconnected_inputs='warn')
grads = [T.clip(g, floatX(-GRAD_CLIP), floatX(GRAD_CLIP)) for g in grads]
# learning_rate = T.scalar('learning_rate')
updates = lasagne.updates.adam(grads, params, learning_rate = 1e-3)
train_fn = theano.function([X, X_r], cost, updates = updates)
valid_fn = theano.function([X, X_r], cost)
generate_routine = theano.function([X], output_image)
def generate_fn(generate_routine, HEIGHT, WIDTH, num):
X = floatX(numpy.zeros((num, 3, HEIGHT, WIDTH)))
out = numpy.zeros((num,HEIGHT, WIDTH, 3))
for i in range(HEIGHT):
for j in range(WIDTH):
samples = generate_routine(X)
out[:,i,j] = samples[:,i,j]
X[:,:,i,j] = downscale_images(samples[:,i,j,:], Q_LEVELS - 1)
return out
(X_train_r, _), (X_test_r, _) = cifar10.load_data()
X_train_r = upscale_images(downscale_images(X_train_r, 256), Q_LEVELS)
X_test_r = upscale_images(downscale_images(X_test_r, 256), Q_LEVELS)
X_train = downscale_images(X_train_r, Q_LEVELS - 1)
X_test = downscale_images(X_test_r, Q_LEVELS - 1)
errors = {'training' : [], 'validation' : []}
num_iters = 0
# init_learning_rate = floatX(0.001)
print "Training"
for i in range(EPOCH):
"""Training"""
costs = []
num_batch_train = len(X_train)//BATCH_SIZE
for j in range(num_batch_train):
cost = train_fn(
X_train[j*BATCH_SIZE: (j+1)*BATCH_SIZE],
X_train_r[j*BATCH_SIZE: (j+1)*BATCH_SIZE]
)
costs.append(cost)
num_iters += 1
if (j+1) % PRINT_EVERY == 0:
print ("Training: epoch {}, iter {}, cost {}".format(i,j+1,numpy.mean(costs)))
print("Training cost for epoch {}: {}".format(i+1, numpy.mean(costs)))
errors['training'].append(numpy.mean(costs))
costs = []
num_batch_valid = len(X_test)//BATCH_SIZE
for j in range(num_batch_valid):
cost = valid_fn(
X_test[j*BATCH_SIZE: (j+1)*BATCH_SIZE],
X_test_r[j*BATCH_SIZE: (j+1)*BATCH_SIZE]
)
costs.append(cost)
if (j+1) % PRINT_EVERY == 0:
print ("Validation: epoch {}, iter {}, cost {}".format(i,j+1,numpy.mean(costs)))
model.save_params('{}/epoch_{}_val_error_{}.pkl'.format(OUT_DIR,i, numpy.mean(costs)))
X = generate_fn(generate_routine, 32, 32, 25)
reconstruction = generate_routine(X_test[:25])
plot_25_figure(X, '{}/epoch_{}_val_error_{}_gen_images.jpg'.format(OUT_DIR, i, numpy.mean(costs)), num_channels = 3)
plot_25_figure(reconstruction, '{}/epoch_{}_reconstructed.jpg'.format(OUT_DIR, i), num_channels = 3)
print("Validation cost after epoch {}: {}".format(i+1, numpy.mean(costs)))
errors['validation'].append(numpy.mean(costs))
if i % 2 == 0:
save(errors, '{}/epoch_{}_NLL.pkl'.format(OUT_DIR, i))
| mit |
SeanCameronConklin/aima-python | learning.py | 22 | 35993 | """Learn to estimate functions from examples. (Chapters 18-20)"""
from utils import (
removeall, unique, product, argmax, argmax_random_tie, isclose,
dotproduct, vector_add, scalar_vector_product, weighted_sample_with_replacement,
weighted_sampler, num_or_str, normalize, clip, sigmoid, print_table, DataFile
)
import copy
import heapq
import math
import random
# XXX statistics.mode is not quite the same as the old utils.mode:
# it insists on there being a unique most-frequent value. Code using mode
# needs to be revisited, or we need to restore utils.mode.
from statistics import mean, mode
from collections import defaultdict
# ______________________________________________________________________________
def rms_error(predictions, targets):
return math.sqrt(ms_error(predictions, targets))
def ms_error(predictions, targets):
return mean([(p - t)**2 for p, t in zip(predictions, targets)])
def mean_error(predictions, targets):
return mean([abs(p - t) for p, t in zip(predictions, targets)])
def manhattan_distance(predictions, targets):
return sum([abs(p - t) for p, t in zip(predictions, targets)])
def mean_boolean_error(predictions, targets):
return mean([(p != t) for p, t in zip(predictions, targets)])
# ______________________________________________________________________________
class DataSet:
"""A data set for a machine learning problem. It has the following fields:
d.examples A list of examples. Each one is a list of attribute values.
d.attrs A list of integers to index into an example, so example[attr]
gives a value. Normally the same as range(len(d.examples[0])).
d.attrnames Optional list of mnemonic names for corresponding attrs.
d.target The attribute that a learning algorithm will try to predict.
By default the final attribute.
d.inputs The list of attrs without the target.
d.values A list of lists: each sublist is the set of possible
values for the corresponding attribute. If initially None,
it is computed from the known examples by self.setproblem.
If not None, an erroneous value raises ValueError.
d.distance A function from a pair of examples to a nonnegative number.
Should be symmetric, etc. Defaults to mean_boolean_error
since that can handle any field types.
d.name Name of the data set (for output display only).
d.source URL or other source where the data came from.
Normally, you call the constructor and you're done; then you just
access fields like d.examples and d.target and d.inputs."""
def __init__(self, examples=None, attrs=None, attrnames=None, target=-1,
inputs=None, values=None, distance=mean_boolean_error,
name='', source='', exclude=()):
"""Accepts any of DataSet's fields. Examples can also be a
string or file from which to parse examples using parse_csv.
Optional parameter: exclude, as documented in .setproblem().
>>> DataSet(examples='1, 2, 3')
<DataSet(): 1 examples, 3 attributes>
"""
self.name = name
self.source = source
self.values = values
self.distance = distance
if values is None:
self.got_values_flag = False
else:
self.got_values_flag = True
# Initialize .examples from string or list or data directory
if isinstance(examples, str):
self.examples = parse_csv(examples)
elif examples is None:
self.examples = parse_csv(DataFile(name + '.csv').read())
else:
self.examples = examples
# Attrs are the indices of examples, unless otherwise stated.
if attrs is None and self.examples is not None:
attrs = list(range(len(self.examples[0])))
self.attrs = attrs
# Initialize .attrnames from string, list, or by default
if isinstance(attrnames, str):
self.attrnames = attrnames.split()
else:
self.attrnames = attrnames or attrs
self.setproblem(target, inputs=inputs, exclude=exclude)
def setproblem(self, target, inputs=None, exclude=()):
"""Set (or change) the target and/or inputs.
This way, one DataSet can be used multiple ways. inputs, if specified,
is a list of attributes, or specify exclude as a list of attributes
to not use in inputs. Attributes can be -n .. n, or an attrname.
Also computes the list of possible values, if that wasn't done yet."""
self.target = self.attrnum(target)
exclude = map(self.attrnum, exclude)
if inputs:
self.inputs = removeall(self.target, inputs)
else:
self.inputs = [a for a in self.attrs
if a != self.target and a not in exclude]
if not self.values:
self.values = list(map(unique, zip(*self.examples)))
self.check_me()
def check_me(self):
"Check that my fields make sense."
assert len(self.attrnames) == len(self.attrs)
assert self.target in self.attrs
assert self.target not in self.inputs
assert set(self.inputs).issubset(set(self.attrs))
if self.got_values_flag:
# only check if values are provided while initializing DataSet
list(map(self.check_example, self.examples))
def add_example(self, example):
"Add an example to the list of examples, checking it first."
self.check_example(example)
self.examples.append(example)
def check_example(self, example):
"Raise ValueError if example has any invalid values."
if self.values:
for a in self.attrs:
if example[a] not in self.values[a]:
raise ValueError('Bad value %s for attribute %s in %s' %
(example[a], self.attrnames[a], example))
def attrnum(self, attr):
"Returns the number used for attr, which can be a name, or -n .. n-1."
if isinstance(attr, str):
return self.attrnames.index(attr)
elif attr < 0:
return len(self.attrs) + attr
else:
return attr
def sanitize(self, example):
"Return a copy of example, with non-input attributes replaced by None."
return [attr_i if i in self.inputs else None
for i, attr_i in enumerate(example)]
def __repr__(self):
return '<DataSet(%s): %d examples, %d attributes>' % (
self.name, len(self.examples), len(self.attrs))
# ______________________________________________________________________________
def parse_csv(input, delim=','):
r"""Input is a string consisting of lines, each line has comma-delimited
fields. Convert this into a list of lists. Blank lines are skipped.
Fields that look like numbers are converted to numbers.
The delim defaults to ',' but '\t' and None are also reasonable values.
>>> parse_csv('1, 2, 3 \n 0, 2, na')
[[1, 2, 3], [0, 2, 'na']]
"""
lines = [line for line in input.splitlines() if line.strip()]
return [list(map(num_or_str, line.split(delim))) for line in lines]
# ______________________________________________________________________________
class CountingProbDist:
"""A probability distribution formed by observing and counting examples.
If p is an instance of this class and o is an observed value, then
there are 3 main operations:
p.add(o) increments the count for observation o by 1.
p.sample() returns a random element from the distribution.
p[o] returns the probability for o (as in a regular ProbDist)."""
def __init__(self, observations=[], default=0):
"""Create a distribution, and optionally add in some observations.
By default this is an unsmoothed distribution, but saying default=1,
for example, gives you add-one smoothing."""
self.dictionary = {}
self.n_obs = 0.0
self.default = default
self.sampler = None
for o in observations:
self.add(o)
def add(self, o):
"Add an observation o to the distribution."
self.smooth_for(o)
self.dictionary[o] += 1
self.n_obs += 1
self.sampler = None
def smooth_for(self, o):
"""Include o among the possible observations, whether or not
it's been observed yet."""
if o not in self.dictionary:
self.dictionary[o] = self.default
self.n_obs += self.default
self.sampler = None
def __getitem__(self, item):
"Return an estimate of the probability of item."
self.smooth_for(item)
return self.dictionary[item] / self.n_obs
# (top() and sample() are not used in this module, but elsewhere.)
def top(self, n):
"Return (count, obs) tuples for the n most frequent observations."
return heapq.nlargest(n, [(v, k) for (k, v) in self.dictionary.items()])
def sample(self):
"Return a random sample from the distribution."
if self.sampler is None:
self.sampler = weighted_sampler(list(self.dictionary.keys()),
list(self.dictionary.values()))
return self.sampler()
# ______________________________________________________________________________
def PluralityLearner(dataset):
"""A very dumb algorithm: always pick the result that was most popular
in the training data. Makes a baseline for comparison."""
most_popular = mode([e[dataset.target] for e in dataset.examples])
def predict(example):
"Always return same result: the most popular from the training set."
return most_popular
return predict
# ______________________________________________________________________________
def NaiveBayesLearner(dataset):
"""Just count how many times each value of each input attribute
occurs, conditional on the target value. Count the different
target values too."""
targetvals = dataset.values[dataset.target]
target_dist = CountingProbDist(targetvals)
attr_dists = {(gv, attr): CountingProbDist(dataset.values[attr])
for gv in targetvals
for attr in dataset.inputs}
for example in dataset.examples:
targetval = example[dataset.target]
target_dist.add(targetval)
for attr in dataset.inputs:
attr_dists[targetval, attr].add(example[attr])
def predict(example):
"""Predict the target value for example. Consider each possible value,
and pick the most likely by looking at each attribute independently."""
def class_probability(targetval):
return (target_dist[targetval] *
product(attr_dists[targetval, attr][example[attr]]
for attr in dataset.inputs))
return argmax(targetvals, key=class_probability)
return predict
# ______________________________________________________________________________
def NearestNeighborLearner(dataset, k=1):
"k-NearestNeighbor: the k nearest neighbors vote."
def predict(example):
"Find the k closest, and have them vote for the best."
best = heapq.nsmallest(k, ((dataset.distance(e, example), e)
for e in dataset.examples))
return mode(e[dataset.target] for (d, e) in best)
return predict
# ______________________________________________________________________________
class DecisionFork:
"""A fork of a decision tree holds an attribute to test, and a dict
of branches, one for each of the attribute's values."""
def __init__(self, attr, attrname=None, branches=None):
"Initialize by saying what attribute this node tests."
self.attr = attr
self.attrname = attrname or attr
self.branches = branches or {}
def __call__(self, example):
"Given an example, classify it using the attribute and the branches."
attrvalue = example[self.attr]
return self.branches[attrvalue](example)
def add(self, val, subtree):
"Add a branch. If self.attr = val, go to the given subtree."
self.branches[val] = subtree
def display(self, indent=0):
name = self.attrname
print('Test', name)
for (val, subtree) in self.branches.items():
print(' ' * 4 * indent, name, '=', val, '==>', end=' ')
subtree.display(indent + 1)
def __repr__(self):
return ('DecisionFork(%r, %r, %r)'
% (self.attr, self.attrname, self.branches))
class DecisionLeaf:
"A leaf of a decision tree holds just a result."
def __init__(self, result):
self.result = result
def __call__(self, example):
return self.result
def display(self, indent=0):
print('RESULT =', self.result)
def __repr__(self):
return repr(self.result)
# ______________________________________________________________________________
def DecisionTreeLearner(dataset):
"[Figure 18.5]"
target, values = dataset.target, dataset.values
def decision_tree_learning(examples, attrs, parent_examples=()):
if len(examples) == 0:
return plurality_value(parent_examples)
elif all_same_class(examples):
return DecisionLeaf(examples[0][target])
elif len(attrs) == 0:
return plurality_value(examples)
else:
A = choose_attribute(attrs, examples)
tree = DecisionFork(A, dataset.attrnames[A])
for (v_k, exs) in split_by(A, examples):
subtree = decision_tree_learning(
exs, removeall(A, attrs), examples)
tree.add(v_k, subtree)
return tree
def plurality_value(examples):
"""Return the most popular target value for this set of examples.
(If target is binary, this is the majority; otherwise plurality.)"""
popular = argmax_random_tie(values[target],
key=lambda v: count(target, v, examples))
return DecisionLeaf(popular)
def count(attr, val, examples):
"Count the number of examples that have attr = val."
return count(e[attr] == val for e in examples)
def all_same_class(examples):
"Are all these examples in the same target class?"
class0 = examples[0][target]
return all(e[target] == class0 for e in examples)
def choose_attribute(attrs, examples):
"Choose the attribute with the highest information gain."
return argmax_random_tie(attrs,
key=lambda a: information_gain(a, examples))
def information_gain(attr, examples):
"Return the expected reduction in entropy from splitting by attr."
def I(examples):
return information_content([count(target, v, examples)
for v in values[target]])
N = float(len(examples))
remainder = sum((len(examples_i) / N) * I(examples_i)
for (v, examples_i) in split_by(attr, examples))
return I(examples) - remainder
def split_by(attr, examples):
"Return a list of (val, examples) pairs for each val of attr."
return [(v, [e for e in examples if e[attr] == v])
for v in values[attr]]
return decision_tree_learning(dataset.examples, dataset.inputs)
def information_content(values):
"Number of bits to represent the probability distribution in values."
probabilities = normalize(removeall(0, values))
return sum(-p * math.log2(p) for p in probabilities)
# ______________________________________________________________________________
# A decision list is implemented as a list of (test, value) pairs.
def DecisionListLearner(dataset):
"""[Figure 18.11]"""
def decision_list_learning(examples):
if not examples:
return [(True, False)]
t, o, examples_t = find_examples(examples)
if not t:
raise Failure
return [(t, o)] + decision_list_learning(examples - examples_t)
def find_examples(examples):
"""Find a set of examples that all have the same outcome under
some test. Return a tuple of the test, outcome, and examples."""
raise NotImplementedError
def passes(example, test):
"Does the example pass the test?"
raise NotImplementedError
def predict(example):
"Predict the outcome for the first passing test."
for test, outcome in predict.decision_list:
if passes(example, test):
return outcome
predict.decision_list = decision_list_learning(set(dataset.examples))
return predict
# ______________________________________________________________________________
def NeuralNetLearner(dataset, hidden_layer_sizes=[3],
learning_rate=0.01, epoches=100):
"""
Layered feed-forward network.
hidden_layer_sizes: List of number of hidden units per hidden layer
learning_rate: Learning rate of gradient decent
epoches: Number of passes over the dataset
"""
i_units = len(dataset.inputs)
o_units = 1 # As of now, dataset.target gives only one index.
# construct a network
raw_net = network(i_units, hidden_layer_sizes, o_units)
learned_net = BackPropagationLearner(dataset, raw_net,
learning_rate, epoches)
def predict(example):
# Input nodes
i_nodes = learned_net[0]
# Activate input layer
for v, n in zip(example, i_nodes):
n.value = v
# Forward pass
for layer in learned_net[1:]:
for node in layer:
inc = [n.value for n in node.inputs]
in_val = dotproduct(inc, node.weights)
node.value = node.activation(in_val)
# Hypothesis
o_nodes = learned_net[-1]
pred = [o_nodes[i].value for i in range(o_units)]
return 1 if pred[0] >= 0.5 else 0
return predict
class NNUnit:
"""
Single Unit of Multiple Layer Neural Network
inputs: Incoming connections
weights: weights to incoming connections
"""
def __init__(self, weights=None, inputs=None):
self.weights = []
self.inputs = []
self.value = None
self.activation = sigmoid
def network(input_units, hidden_layer_sizes, output_units):
"""
Create of Directed Acyclic Network of given number layers
hidden_layers_sizes : list number of neuron units in each hidden layer
excluding input and output layers.
"""
# Check for PerceptronLearner
if hidden_layer_sizes:
layers_sizes = [input_units] + hidden_layer_sizes + [output_units]
else:
layers_sizes = [input_units] + [output_units]
net = [[NNUnit() for n in range(size)]
for size in layers_sizes]
n_layers = len(net)
# Make Connection
for i in range(1, n_layers):
for n in net[i]:
for k in net[i-1]:
n.inputs.append(k)
n.weights.append(0)
return net
def BackPropagationLearner(dataset, net, learning_rate, epoches):
"[Figure 18.23] The back-propagation algorithm for multilayer network"
# Initialise weights
for layer in net:
for node in layer:
node.weights = [random.uniform(-0.5, 0.5)
for i in range(len(node.weights))]
examples = dataset.examples
'''
As of now dataset.target gives an int instead of list,
Changing dataset class will have effect on all the learners.
Will be taken care of later
'''
idx_t = [dataset.target]
idx_i = dataset.inputs
n_layers = len(net)
o_nodes = net[-1]
i_nodes = net[0]
for epoch in range(epoches):
# Iterate over each example
for e in examples:
i_val = [e[i] for i in idx_i]
t_val = [e[i] for i in idx_t]
# Activate input layer
for v, n in zip(i_val, i_nodes):
n.value = v
# Forward pass
for layer in net[1:]:
for node in layer:
inc = [n.value for n in node.inputs]
in_val = dotproduct(inc, node.weights)
node.value = node.activation(in_val)
# Initialize delta
delta = [[] for i in range(n_layers)]
# Compute outer layer delta
o_units = len(o_nodes)
err = [t_val[i] - o_nodes[i].value
for i in range(o_units)]
delta[-1] = [(o_nodes[i].value) * (1 - o_nodes[i].value) *
(err[i]) for i in range(o_units)]
# Backward pass
h_layers = n_layers - 2
for i in range(h_layers, 0, -1):
layer = net[i]
h_units = len(layer)
nx_layer = net[i+1]
# weights from each ith layer node to each i + 1th layer node
w = [[node.weights[k] for node in nx_layer]
for k in range(h_units)]
delta[i] = [(layer[j].value) * (1 - layer[j].value) *
dotproduct(w[j], delta[i+1])
for j in range(h_units)]
# Update weights
for i in range(1, n_layers):
layer = net[i]
inc = [node.value for node in net[i-1]]
units = len(layer)
for j in range(units):
layer[j].weights = vector_add(layer[j].weights,
scalar_vector_product(
learning_rate * delta[i][j], inc))
return net
def PerceptronLearner(dataset, learning_rate=0.01, epoches=100):
"""Logistic Regression, NO hidden layer"""
i_units = len(dataset.inputs)
o_units = 1 # As of now, dataset.target gives only one index.
hidden_layer_sizes = []
raw_net = network(i_units, hidden_layer_sizes, o_units)
learned_net = BackPropagationLearner(dataset, raw_net, learning_rate, epoches)
def predict(example):
# Input nodes
i_nodes = learned_net[0]
# Activate input layer
for v, n in zip(example, i_nodes):
n.value = v
# Forward pass
for layer in learned_net[1:]:
for node in layer:
inc = [n.value for n in node.inputs]
in_val = dotproduct(inc, node.weights)
node.value = node.activation(in_val)
# Hypothesis
o_nodes = learned_net[-1]
pred = [o_nodes[i].value for i in range(o_units)]
return 1 if pred[0] >= 0.5 else 0
return predict
# ______________________________________________________________________________
def Linearlearner(dataset, learning_rate=0.01, epochs=100):
"""Define with learner = Linearlearner(data); infer with learner(x)."""
idx_i = dataset.inputs
idx_t = dataset.target # As of now, dataset.target gives only one index.
examples = dataset.examples
# X transpose
X_col = [dataset.values[i] for i in idx_i] # vertical columns of X
# Add dummy
ones = [1 for i in range(len(examples))]
X_col = ones + X_col
# Initialize random weigts
w = [random(-0.5, 0.5) for i in range(len(idx_i) + 1)]
for epoch in range(epochs):
err = []
# Pass over all examples
for example in examples:
x = [example[i] for i in range(idx_i)]
x = [1] + x
y = dotproduct(w, x)
t = example[idx_t]
err.append(t - y)
# update weights
for i in range(len(w)):
w[i] = w[i] - dotproduct(err, X_col[i])
def predict(example):
x = [1] + example
return dotproduct(w, x)
return predict
# ______________________________________________________________________________
def EnsembleLearner(learners):
"""Given a list of learning algorithms, have them vote."""
def train(dataset):
predictors = [learner(dataset) for learner in learners]
def predict(example):
return mode(predictor(example) for predictor in predictors)
return predict
return train
# ______________________________________________________________________________
def AdaBoost(L, K):
"""[Figure 18.34]"""
def train(dataset):
examples, target = dataset.examples, dataset.target
N = len(examples)
epsilon = 1. / (2 * N)
w = [1. / N] * N
h, z = [], []
for k in range(K):
h_k = L(dataset, w)
h.append(h_k)
error = sum(weight for example, weight in zip(examples, w)
if example[target] != h_k(example))
# Avoid divide-by-0 from either 0% or 100% error rates:
error = clip(error, epsilon, 1 - epsilon)
for j, example in enumerate(examples):
if example[target] == h_k(example):
w[j] *= error / (1. - error)
w = normalize(w)
z.append(math.log((1. - error) / error))
return WeightedMajority(h, z)
return train
def WeightedMajority(predictors, weights):
"Return a predictor that takes a weighted vote."
def predict(example):
return weighted_mode((predictor(example) for predictor in predictors),
weights)
return predict
def weighted_mode(values, weights):
"""Return the value with the greatest total weight.
>>> weighted_mode('abbaa', [1,2,3,1,2])
'b'"""
totals = defaultdict(int)
for v, w in zip(values, weights):
totals[v] += w
return max(list(totals.keys()), key=totals.get)
# _____________________________________________________________________________
# Adapting an unweighted learner for AdaBoost
def WeightedLearner(unweighted_learner):
"""Given a learner that takes just an unweighted dataset, return
one that takes also a weight for each example. [p. 749 footnote 14]"""
def train(dataset, weights):
return unweighted_learner(replicated_dataset(dataset, weights))
return train
def replicated_dataset(dataset, weights, n=None):
"Copy dataset, replicating each example in proportion to its weight."
n = n or len(dataset.examples)
result = copy.copy(dataset)
result.examples = weighted_replicate(dataset.examples, weights, n)
return result
def weighted_replicate(seq, weights, n):
"""Return n selections from seq, with the count of each element of
seq proportional to the corresponding weight (filling in fractions
randomly).
>>> weighted_replicate('ABC', [1,2,1], 4)
['A', 'B', 'B', 'C']"""
assert len(seq) == len(weights)
weights = normalize(weights)
wholes = [int(w * n) for w in weights]
fractions = [(w * n) % 1 for w in weights]
return (flatten([x] * nx for x, nx in zip(seq, wholes)) +
weighted_sample_with_replacement(seq, fractions, n - sum(wholes)))
def flatten(seqs): return sum(seqs, [])
# _____________________________________________________________________________
# Functions for testing learners on examples
def test(predict, dataset, examples=None, verbose=0):
"Return the proportion of the examples that are NOT correctly predicted."
if examples is None:
examples = dataset.examples
if len(examples) == 0:
return 0.0
right = 0.0
for example in examples:
desired = example[dataset.target]
output = predict(dataset.sanitize(example))
if output == desired:
right += 1
if verbose >= 2:
print(' OK: got %s for %s' % (desired, example))
elif verbose:
print('WRONG: got %s, expected %s for %s' % (
output, desired, example))
return 1 - (right / len(examples))
def train_and_test(dataset, start, end):
"""Reserve dataset.examples[start:end] for test; train on the remainder."""
start = int(start)
end = int(end)
examples = dataset.examples
train = examples[:start] + examples[end:]
val = examples[start:end]
return train, val
def cross_validation(learner, size, dataset, k=10, trials=1):
"""Do k-fold cross_validate and return their mean.
That is, keep out 1/k of the examples for testing on each of k runs.
Shuffle the examples first; If trials>1, average over several shuffles.
Returns Training error, Validataion error"""
if k is None:
k = len(dataset.examples)
if trials > 1:
trial_errT = 0
trial_errV = 0
for t in range(trials):
errT, errV = cross_validation(learner, size, dataset,
k=10, trials=1)
trial_errT += errT
trial_errV += errV
return trial_errT / trials, trial_errV / trials
else:
fold_errT = 0
fold_errV = 0
n = len(dataset.examples)
examples = dataset.examples
for fold in range(k):
random.shuffle(dataset.examples)
train_data, val_data = train_and_test(dataset, fold * (n / k),
(fold + 1) * (n / k))
dataset.examples = train_data
h = learner(dataset, size)
fold_errT += test(h, dataset, train_data)
fold_errV += test(h, dataset, val_data)
# Reverting back to original once test is completed
dataset.examples = examples
return fold_errT / k, fold_errV / k
def cross_validation_wrapper(learner, dataset, k=10, trials=1):
"""
Fig 18.8
Return the optimal value of size having minimum error
on validataion set
err_train: a training error array, indexed by size
err_val: a validataion error array, indexed by size
"""
err_val = []
err_train = []
size = 1
while True:
errT, errV = cross_validation(learner, size, dataset, k)
# Check for convergence provided err_val is not empty
if (err_val and isclose(err_val[-1], errV, rel_tol=1e-6)):
best_size = size
return learner(dataset, best_size)
err_val.append(errV)
err_train.append(errT)
print(err_val)
size += 1
def leave_one_out(learner, dataset):
"Leave one out cross-validation over the dataset."
return cross_validation(learner, size, dataset, k=len(dataset.examples))
def learningcurve(learner, dataset, trials=10, sizes=None):
if sizes is None:
sizes = list(range(2, len(dataset.examples) - 10, 2))
def score(learner, size):
random.shuffle(dataset.examples)
return train_and_test(learner, dataset, 0, size)
return [(size, mean([score(learner, size) for t in range(trials)]))
for size in sizes]
# ______________________________________________________________________________
# The rest of this file gives datasets for machine learning problems.
orings = DataSet(name='orings', target='Distressed',
attrnames="Rings Distressed Temp Pressure Flightnum")
zoo = DataSet(name='zoo', target='type', exclude=['name'],
attrnames="name hair feathers eggs milk airborne aquatic " +
"predator toothed backbone breathes venomous fins legs tail " +
"domestic catsize type")
iris = DataSet(name="iris", target="class",
attrnames="sepal-len sepal-width petal-len petal-width class")
# ______________________________________________________________________________
# The Restaurant example from [Figure 18.2]
def RestaurantDataSet(examples=None):
"Build a DataSet of Restaurant waiting examples. [Figure 18.3]"
return DataSet(name='restaurant', target='Wait', examples=examples,
attrnames='Alternate Bar Fri/Sat Hungry Patrons Price ' +
'Raining Reservation Type WaitEstimate Wait')
restaurant = RestaurantDataSet()
def T(attrname, branches):
branches = {value: (child if isinstance(child, DecisionFork)
else DecisionLeaf(child))
for value, child in branches.items()}
return DecisionFork(restaurant.attrnum(attrname), attrname, branches)
""" [Figure 18.2]
A decision tree for deciding whether to wait for a table at a hotel.
"""
waiting_decision_tree = T('Patrons',
{'None': 'No', 'Some': 'Yes', 'Full':
T('WaitEstimate',
{'>60': 'No', '0-10': 'Yes',
'30-60':
T('Alternate', {'No':
T('Reservation', {'Yes': 'Yes', 'No':
T('Bar', {'No': 'No',
'Yes': 'Yes'
})}),
'Yes':
T('Fri/Sat', {'No': 'No', 'Yes': 'Yes'})}),
'10-30':
T('Hungry', {'No': 'Yes', 'Yes':
T('Alternate',
{'No': 'Yes', 'Yes':
T('Raining', {'No': 'No', 'Yes': 'Yes'})
})})})})
def SyntheticRestaurant(n=20):
"Generate a DataSet with n examples."
def gen():
example = list(map(random.choice, restaurant.values))
example[restaurant.target] = waiting_decision_tree(example)
return example
return RestaurantDataSet([gen() for i in range(n)])
# ______________________________________________________________________________
# Artificial, generated datasets.
def Majority(k, n):
"""Return a DataSet with n k-bit examples of the majority problem:
k random bits followed by a 1 if more than half the bits are 1, else 0."""
examples = []
for i in range(n):
bits = [random.choice([0, 1]) for i in range(k)]
bits.append(int(sum(bits) > k / 2))
examples.append(bits)
return DataSet(name="majority", examples=examples)
def Parity(k, n, name="parity"):
"""Return a DataSet with n k-bit examples of the parity problem:
k random bits followed by a 1 if an odd number of bits are 1, else 0."""
examples = []
for i in range(n):
bits = [random.choice([0, 1]) for i in range(k)]
bits.append(sum(bits) % 2)
examples.append(bits)
return DataSet(name=name, examples=examples)
def Xor(n):
"""Return a DataSet with n examples of 2-input xor."""
return Parity(2, n, name="xor")
def ContinuousXor(n):
"2 inputs are chosen uniformly from (0.0 .. 2.0]; output is xor of ints."
examples = []
for i in range(n):
x, y = [random.uniform(0.0, 2.0) for i in '12']
examples.append([x, y, int(x) != int(y)])
return DataSet(name="continuous xor", examples=examples)
# ______________________________________________________________________________
def compare(algorithms=[PluralityLearner, NaiveBayesLearner,
NearestNeighborLearner, DecisionTreeLearner],
datasets=[iris, orings, zoo, restaurant, SyntheticRestaurant(20),
Majority(7, 100), Parity(7, 100), Xor(100)],
k=10, trials=1):
"""Compare various learners on various datasets using cross-validation.
Print results as a table."""
print_table([[a.__name__.replace('Learner', '')] +
[cross_validation(a, d, k, trials) for d in datasets]
for a in algorithms],
header=[''] + [d.name[0:7] for d in datasets], numfmt='%.2f')
| mit |
Akshay0724/scikit-learn | examples/text/document_clustering.py | 32 | 8526 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent semantic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
# work-around for Jupyter notebook and IPython console
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
# categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
sorgerlab/belpy | indra/sources/omnipath/api.py | 5 | 2012 | import logging
import requests
from .processor import OmniPathProcessor
logger = logging.getLogger(__name__)
op_url = 'http://omnipathdb.org'
def process_from_web():
"""Query the OmniPath web API and return an OmniPathProcessor.
Returns
-------
OmniPathProcessor
An OmniPathProcessor object which contains a list of extracted
INDRA Statements in its statements attribute.
"""
ptm_json = _get_modifications()
ligrec_json = _get_interactions()
op = OmniPathProcessor(ptm_json=ptm_json, ligrec_json=ligrec_json)
op.process_ptm_mods()
op.process_ligrec_interactions()
return op
def _get_modifications():
"""Get all PTMs from Omnipath in JSON format.
Returns
-------
JSON content for PTMs.
"""
params = {'format': 'json',
'fields': ['curation_effort', 'isoforms', 'references',
'resources', 'sources']}
ptm_url = '%s/ptms' % op_url
res = requests.get(ptm_url, params=params)
if not res.status_code == 200 or not res.text:
return None
else:
return res.json()
def _get_interactions(datasets=None):
"""Wrapper for calling the omnipath interactions API
See full list of query options here:
https://omnipathdb.org/queries/interactions
Parameters
----------
datasets
A list of dataset names. Options are:
dorothea, kinaseextra, ligrecextra, lncrna_mrna, mirnatarget,
omnipath, pathwayextra, tf_mirna, tf_target, tfregulons
Default: 'ligrecextra'
Returns
-------
dict
json of database request
"""
interactions_url = '%s/interactions' % op_url
params = {
'fields': ['curation_effort', 'entity_type', 'references',
'resources', 'sources', 'type'],
'format': 'json',
'datasets': datasets or ['ligrecextra']
}
res = requests.get(interactions_url, params=params)
res.raise_for_status()
return res.json()
| mit |
jmargeta/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 4 | 2865 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotical optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import pylab as pl
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
pl.subplot(2, 1, 1)
pl.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
pl.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
pl.ylabel("Squared error")
pl.legend(loc="upper right")
pl.title("Comparison of covariance estimators")
pl.xlim(5, 31)
# plot shrinkage coefficient
pl.subplot(2, 1, 2)
pl.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
pl.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
pl.xlabel("n_samples")
pl.ylabel("Shrinkage")
pl.legend(loc="lower right")
pl.ylim(pl.ylim()[0], 1. + (pl.ylim()[1] - pl.ylim()[0]) / 10.)
pl.xlim(5, 31)
pl.show()
| bsd-3-clause |
google/lasr | third_party/PerceptualSimilarity/util/util.py | 1 | 14037 | from __future__ import print_function
import numpy as np
from PIL import Image
import inspect
import re
import numpy as np
import os
import collections
import matplotlib.pyplot as plt
from scipy.ndimage.interpolation import zoom
#from skimage.measure import compare_ssim
import torch
#from IPython import embed
import cv2
from datetime import datetime
def datetime_str():
now = datetime.now()
return '%04d-%02d-%02d-%02d-%02d-%02d'%(now.year,now.month,now.day,now.hour,now.minute,now.second)
def read_text_file(in_path):
fid = open(in_path,'r')
vals = []
cur_line = fid.readline()
while(cur_line!=''):
vals.append(float(cur_line))
cur_line = fid.readline()
fid.close()
return np.array(vals)
def bootstrap(in_vec,num_samples=100,bootfunc=np.mean):
from astropy import stats
return stats.bootstrap(np.array(in_vec),bootnum=num_samples,bootfunc=bootfunc)
def rand_flip(input1,input2):
if(np.random.binomial(1,.5)==1):
return (input1,input2)
else:
return (input2,input1)
def l2(p0, p1, range=255.):
return .5*np.mean((p0 / range - p1 / range)**2)
def psnr(p0, p1, peak=255.):
return 10*np.log10(peak**2/np.mean((1.*p0-1.*p1)**2))
def dssim(p0, p1, range=255.):
# embed()
return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2.
def rgb2lab(in_img,mean_cent=False):
from skimage import color
img_lab = color.rgb2lab(in_img)
if(mean_cent):
img_lab[:,:,0] = img_lab[:,:,0]-50
return img_lab
def normalize_blob(in_feat,eps=1e-10):
norm_factor = np.sqrt(np.sum(in_feat**2,axis=1,keepdims=True))
return in_feat/(norm_factor+eps)
def cos_sim_blob(in0,in1):
in0_norm = normalize_blob(in0)
in1_norm = normalize_blob(in1)
(N,C,X,Y) = in0_norm.shape
return np.mean(np.mean(np.sum(in0_norm*in1_norm,axis=1),axis=1),axis=1)
def normalize_tensor(in_feat,eps=1e-10):
# norm_factor = torch.sqrt(torch.sum(in_feat**2,dim=1)).view(in_feat.size()[0],1,in_feat.size()[2],in_feat.size()[3]).repeat(1,in_feat.size()[1],1,1)
norm_factor = torch.sqrt(torch.sum(in_feat**2,dim=1)).view(in_feat.size()[0],1,in_feat.size()[2],in_feat.size()[3])
return in_feat/(norm_factor.expand_as(in_feat)+eps)
def cos_sim(in0,in1):
in0_norm = normalize_tensor(in0)
in1_norm = normalize_tensor(in1)
N = in0.size()[0]
X = in0.size()[2]
Y = in0.size()[3]
return torch.mean(torch.mean(torch.sum(in0_norm*in1_norm,dim=1).view(N,1,X,Y),dim=2).view(N,1,1,Y),dim=3).view(N)
# Converts a Tensor into a Numpy array
# |imtype|: the desired type of the conve
def tensor2np(tensor_obj):
# change dimension of a tensor object into a numpy array
return tensor_obj[0].cpu().float().numpy().transpose((1,2,0))
def np2tensor(np_obj):
# change dimenion of np array into tensor array
return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
def tensor2tensorlab(image_tensor,to_norm=True,mc_only=False):
# image tensor to lab tensor
from skimage import color
img = tensor2im(image_tensor)
# print('img_rgb',img.flatten())
img_lab = color.rgb2lab(img)
# print('img_lab',img_lab.flatten())
if(mc_only):
img_lab[:,:,0] = img_lab[:,:,0]-50
if(to_norm and not mc_only):
img_lab[:,:,0] = img_lab[:,:,0]-50
img_lab = img_lab/100.
return np2tensor(img_lab)
def tensorlab2tensor(lab_tensor,return_inbnd=False):
from skimage import color
import warnings
warnings.filterwarnings("ignore")
lab = tensor2np(lab_tensor)*100.
lab[:,:,0] = lab[:,:,0]+50
# print('lab',lab)
rgb_back = 255.*np.clip(color.lab2rgb(lab.astype('float')),0,1)
# print('rgb',rgb_back)
if(return_inbnd):
# convert back to lab, see if we match
lab_back = color.rgb2lab(rgb_back.astype('uint8'))
# print('lab_back',lab_back)
# print('lab==lab_back',np.isclose(lab_back,lab,atol=1.))
# print('lab-lab_back',np.abs(lab-lab_back))
mask = 1.*np.isclose(lab_back,lab,atol=2.)
mask = np2tensor(np.prod(mask,axis=2)[:,:,np.newaxis])
return (im2tensor(rgb_back),mask)
else:
return im2tensor(rgb_back)
def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255./2.):
# def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.):
image_numpy = image_tensor[0].cpu().float().numpy()
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
return image_numpy.astype(imtype)
def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):
# def im2tensor(image, imtype=np.uint8, cent=1., factor=1.):
return torch.Tensor((image / factor - cent)
[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
def tensor2vec(vector_tensor):
return vector_tensor.data.cpu().numpy()[:, :, 0, 0]
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def grab_patch(img_in, P, yy, xx):
return img_in[yy:yy+P,xx:xx+P,:]
def load_image(path):
if(path[-3:] == 'dng'):
import rawpy
with rawpy.imread(path) as raw:
img = raw.postprocess()
# img = plt.imread(path)
elif(path[-3:]=='bmp' or path[-3:]=='jpg' or path[-3:]=='png'):
import cv2
return cv2.imread(path)[:,:,::-1]
else:
img = (255*plt.imread(path)[:,:,:3]).astype('uint8')
return img
def resize_image(img, max_size=256):
[Y, X] = img.shape[:2]
# resize
max_dim = max([Y, X])
zoom_factor = 1. * max_size / max_dim
img = zoom(img, [zoom_factor, zoom_factor, 1])
return img
def resize_image_zoom(img, zoom_factor=1., order=3):
if(zoom_factor==1):
return img
else:
return zoom(img, [zoom_factor, zoom_factor, 1], order=order)
def save_image(image_numpy, image_path, ):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def prep_display_image(img, dtype='uint8'):
if(dtype == 'uint8'):
return np.clip(img, 0, 255).astype('uint8')
else:
return np.clip(img, 0, 1.)
def info(object, spacing=10, collapse=1):
"""Print methods and doc strings.
Takes module, class, list, dictionary, or string."""
methodList = [
e for e in dir(object) if isinstance(
getattr(
object,
e),
collections.Callable)]
processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s)
print("\n".join(["%s %s" %
(method.ljust(spacing),
processFunc(str(getattr(object, method).__doc__)))
for method in methodList]))
def varname(p):
for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:
m = re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)', line)
if m:
return m.group(1)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print(
'mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' %
(np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def rgb2lab(input):
from skimage import color
return color.rgb2lab(input / 255.)
def montage(
imgs,
PAD=5,
RATIO=16 / 9.,
EXTRA_PAD=(
False,
False),
MM=-1,
NN=-1,
primeDir=0,
verbose=False,
returnGridPos=False,
backClr=np.array(
(0,
0,
0))):
# INPUTS
# imgs YxXxMxN or YxXxN
# PAD scalar number of pixels in between
# RATIO scalar target ratio of cols/rows
# MM scalar # rows, if specified, overrides RATIO
# NN scalar # columns, if specified, overrides RATIO
# primeDir scalar 0 for top-to-bottom, 1 for left-to-right
# OUTPUTS
# mont_imgs MM*Y x NN*X x M big image with everything montaged
# def montage(imgs, PAD=5, RATIO=16/9., MM=-1, NN=-1, primeDir=0,
# verbose=False, forceFloat=False):
if(imgs.ndim == 3):
toExp = True
imgs = imgs[:, :, np.newaxis, :]
else:
toExp = False
Y = imgs.shape[0]
X = imgs.shape[1]
M = imgs.shape[2]
N = imgs.shape[3]
PADS = np.array((PAD))
if(PADS.flatten().size == 1):
PADY = PADS
PADX = PADS
else:
PADY = PADS[0]
PADX = PADS[1]
if(MM == -1 and NN == -1):
NN = np.ceil(np.sqrt(1.0 * N * RATIO))
MM = np.ceil(1.0 * N / NN)
NN = np.ceil(1.0 * N / MM)
elif(MM == -1):
MM = np.ceil(1.0 * N / NN)
elif(NN == -1):
NN = np.ceil(1.0 * N / MM)
if(primeDir == 0): # write top-to-bottom
[grid_mm, grid_nn] = np.meshgrid(
np.arange(MM, dtype='uint'), np.arange(NN, dtype='uint'))
elif(primeDir == 1): # write left-to-right
[grid_nn, grid_mm] = np.meshgrid(
np.arange(NN, dtype='uint'), np.arange(MM, dtype='uint'))
grid_mm = np.uint(grid_mm.flatten()[0:N])
grid_nn = np.uint(grid_nn.flatten()[0:N])
EXTRA_PADY = EXTRA_PAD[0] * PADY
EXTRA_PADX = EXTRA_PAD[0] * PADX
# mont_imgs = np.zeros(((Y+PAD)*MM-PAD, (X+PAD)*NN-PAD, M), dtype=use_dtype)
mont_imgs = np.zeros(
(np.uint(
(Y + PADY) * MM - PADY + EXTRA_PADY),
np.uint(
(X + PADX) * NN - PADX + EXTRA_PADX),
M),
dtype=imgs.dtype)
mont_imgs = mont_imgs + \
backClr.flatten()[np.newaxis, np.newaxis, :].astype(mont_imgs.dtype)
for ii in np.random.permutation(N):
# print imgs[:,:,:,ii].shape
# mont_imgs[grid_mm[ii]*(Y+PAD):(grid_mm[ii]*(Y+PAD)+Y), grid_nn[ii]*(X+PAD):(grid_nn[ii]*(X+PAD)+X),:]
mont_imgs[np.uint(grid_mm[ii] *
(Y +
PADY)):np.uint((grid_mm[ii] *
(Y +
PADY) +
Y)), np.uint(grid_nn[ii] *
(X +
PADX)):np.uint((grid_nn[ii] *
(X +
PADX) +
X)), :] = imgs[:, :, :, ii]
if(M == 1):
imgs = imgs.reshape(imgs.shape[0], imgs.shape[1], imgs.shape[3])
if(toExp):
mont_imgs = mont_imgs[:, :, 0]
if(returnGridPos):
# return (mont_imgs,np.concatenate((grid_mm[:,:,np.newaxis]*(Y+PAD),
# grid_nn[:,:,np.newaxis]*(X+PAD)),axis=2))
return (mont_imgs, np.concatenate(
(grid_mm[:, np.newaxis] * (Y + PADY), grid_nn[:, np.newaxis] * (X + PADX)), axis=1))
# return (mont_imgs, (grid_mm,grid_nn))
else:
return mont_imgs
class zeroClipper(object):
def __init__(self, frequency=1):
self.frequency = frequency
def __call__(self, module):
embed()
if hasattr(module, 'weight'):
# module.weight.data = torch.max(module.weight.data, 0)
module.weight.data = torch.max(module.weight.data, 0) + 100
def flatten_nested_list(nested_list):
# only works for list of list
accum = []
for sublist in nested_list:
for item in sublist:
accum.append(item)
return accum
def read_file(in_path,list_lines=False):
agg_str = ''
f = open(in_path,'r')
cur_line = f.readline()
while(cur_line!=''):
agg_str+=cur_line
cur_line = f.readline()
f.close()
if(list_lines==False):
return agg_str.replace('\n','')
else:
line_list = agg_str.split('\n')
ret_list = []
for item in line_list:
if(item!=''):
ret_list.append(item)
return ret_list
def read_csv_file_as_text(in_path):
agg_str = []
f = open(in_path,'r')
cur_line = f.readline()
while(cur_line!=''):
agg_str.append(cur_line)
cur_line = f.readline()
f.close()
return agg_str
def random_swap(obj0,obj1):
if(np.random.rand() < .5):
return (obj0,obj1,0)
else:
return (obj1,obj0,1)
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
| apache-2.0 |
woobe/h2o | py/testdir_single_jvm/test_rf_covtype20x_fvec.py | 2 | 6035 | import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_rf, h2o_hosts, h2o_import as h2i, h2o_jobs, h2o_gbm
DO_SMALL = True
drf2ParamDict = {
'response': [None, 'C55'],
'max_depth': [None, 10,20,100],
'nbins': [None,5,10,100,1000],
'ignored_cols_by_name': [None,'C1','C2','C3','C4','C5','C6','C7','C8','C9'],
'sample_rate': [None,0.20,0.40,0.60,0.80,0.90],
'seed': [None,'0','1','11111','19823134','1231231'],
'mtries': [None,1,3,5,7,9,11,13,17,19,23,37,51],
'balance_classes': [0],
'importance': [0],
}
print "Will RF train on one dataset, test on another (multiple params)"
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(node_count=1, java_heap_GB=14)
else:
h2o_hosts.build_cloud_with_hosts(node_count=1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rf_covtype20x_fvec(self):
h2o.beta_features = True
importFolderPath = 'standard'
if DO_SMALL:
csvFilenameTrain = 'covtype.data'
hex_key = 'covtype1x.data.A.hex'
else:
csvFilenameTrain = 'covtype20x.data'
hex_key = 'covtype20x.data.A.hex'
csvPathname = importFolderPath + "/" + csvFilenameTrain
parseResultTrain = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, hex_key=hex_key, timeoutSecs=500)
inspect = h2o_cmd.runInspect(key=parseResultTrain['destination_key'])
dataKeyTrain = parseResultTrain['destination_key']
print "Parse end", dataKeyTrain
# have to re import since source key is gone
# we could just copy the key, but sometimes we change the test/train data to covtype.data
if DO_SMALL:
csvFilenameTest = 'covtype.data'
hex_key = 'covtype1x.data.B.hex'
dataKeyTest2 = 'covtype1x.data.C.hex'
else:
csvFilenameTest = 'covtype20x.data'
hex_key = 'covtype20x.data.B.hex'
dataKeyTest2 = 'covtype20x.data.C.hex'
csvPathname = importFolderPath + "/" + csvFilenameTest
parseResultTest = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, hex_key=hex_key, timeoutSecs=500)
print "Parse result['destination_key']:", parseResultTest['destination_key']
inspect = h2o_cmd.runInspect(key=parseResultTest['destination_key'])
dataKeyTest = parseResultTest['destination_key']
print "Parse end", dataKeyTest
# make a 3rd key so the predict is uncached too!
execExpr = dataKeyTest2 + "=" + dataKeyTest
kwargs = {'str': execExpr, 'timeoutSecs': 15}
resultExec = h2o_cmd.runExec(**kwargs)
# train
# this does RFView to understand when RF completes, so the time reported for RFView here, should be
# considered the "first RFView" times..subsequent have some caching?.
# unless the no_confusion_matrix works
# params is mutable. This is default.
paramDict = drf2ParamDict
params = {
'ntrees': 20,
'destination_key': 'RF_model'
}
colX = h2o_rf.pickRandRfParams(paramDict, params)
kwargs = params.copy()
timeoutSecs = 30 + kwargs['ntrees'] * 60
start = time.time()
rf = h2o_cmd.runRF(parseResult=parseResultTrain,
timeoutSecs=timeoutSecs, retryDelaySecs=1, **kwargs)
print "rf job end on ", dataKeyTrain, 'took', time.time() - start, 'seconds'
print "\nRFView start after job completion"
model_key = kwargs['destination_key']
ntree = kwargs['ntrees']
start = time.time()
# this does the RFModel view for v2. but only model_key is used. Data doesn't matter? (nor ntree)
h2o_cmd.runRFView(None, dataKeyTrain, model_key, ntree=ntree, timeoutSecs=timeoutSecs)
print "First rfview end on ", dataKeyTrain, 'took', time.time() - start, 'seconds'
for trial in range(1):
# scoring
start = time.time()
rfView = h2o_cmd.runRFView(None, dataKeyTest,
model_key, ntree=ntree, timeoutSecs=timeoutSecs, out_of_bag_error_estimate=0, retryDelaySecs=1)
print "rfview", trial, "end on ", dataKeyTest, 'took', time.time() - start, 'seconds.'
(classification_error, classErrorPctList, totalScores) = h2o_rf.simpleCheckRFView(rfv=rfView, ntree=ntree)
self.assertAlmostEqual(classification_error, 50, delta=50,
msg="Classification error %s differs too much" % classification_error)
start = time.time()
predict = h2o.nodes[0].generate_predictions(model_key=model_key, data_key=dataKeyTest2)
print "predict", trial, "end on ", dataKeyTest, 'took', time.time() - start, 'seconds.'
parseKey = parseResultTrain['destination_key']
rfModelKey = rfView['drf_model']['_key']
predictKey = 'Predict.hex'
start = time.time()
predictResult = h2o_cmd.runPredict(
data_key=parseKey,
model_key=rfModelKey,
destination_key=predictKey,
timeoutSecs=timeoutSecs)
predictCMResult = h2o.nodes[0].predict_confusion_matrix(
actual=parseKey,
vactual='C55',
predict=predictKey,
vpredict='predict',
)
cm = predictCMResult['cm']
# These will move into the h2o_gbm.py
pctWrong = h2o_gbm.pp_cm_summary(cm);
print "\nTest\n==========\n"
print h2o_gbm.pp_cm(cm)
print "Trial #", trial, "completed"
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
Fireblend/scikit-learn | examples/linear_model/plot_sgd_iris.py | 284 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
Akshay0724/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 62 | 3231 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
class_names = iris.target_names
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
Fireblend/scikit-learn | examples/exercises/plot_iris_exercise.py | 320 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
ningchi/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 113 | 11393 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.ones((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
ronaldahmed/labor-market-demand-analysis | shallow parsing models/req_model/req_sp.py | 1 | 5176 | """
Structured Perceptron model for REQ entity extraction
"""
import os,sys
path_utils = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(path_utils)
from utils_new import *
import sequences.structured_perceptron as spc
import id_feature_bigram_req as idf
import ext2 as exfc2
from sklearn.metrics import classification_report, accuracy_score
from sklearn.metrics import confusion_matrix
if __name__ == '__main__':
print("Reading data...")
mode = 'by_sent'
#mode = 'by_doc'
train,test,val = getData(test=0.1, val=0.1, tags = ['REQ'], mode=mode,filter_empty=True)
#ipdb.set_trace()
print("Building features...")
feature_mapper = idf.IDFeatures(train,mode=mode)
#feature_mapper = exfc2.ExtendedFeatures(train,mode=mode)
feature_mapper.build_features()
#ipdb.set_trace()
print("Init Struc Perceptron")
epochs = 5
learning_rate = 1
reg = 0
sp = spc.StructuredPerceptron( train.x_dict, train.y_dict, feature_mapper,
num_epochs=epochs, learning_rate=learning_rate,
reg_param=reg)
print("Training...")
sp.train_supervised_bigram(train)
###################################################################################################################
model = 'sp_%i_%s' % (epochs,mode)
print("Saving model with name:",model)
saveObject(sp,model)
###################################################################################################################
print("Predicting...")
print("::Training...")
pred_train = sp.viterbi_decode_corpus_bigram(train)
print("::Validation...")
pred_val = sp.viterbi_decode_corpus_bigram(val)
print("::Testing...")
pred_test = sp.viterbi_decode_corpus_bigram(test)
print("Evaluating...")
eval_train = sp.evaluate_corpus(train, pred_train)
eval_val = sp.evaluate_corpus(val, pred_val, DEBUG=False)
eval_test = sp.evaluate_corpus(test, pred_test)
print("Structured Perceptron - Features Accuracy Train: %.4f | Val: %.4f | Test: %.4f" % (eval_train, eval_val, eval_test) )
print()
print("Metrics: Training data")
cs_train = MyChunkScore(train)
cs_train.evaluate(train,pred_train)
print(cs_train)
print()
print("Metrics: Validation data")
cs_val = MyChunkScore(val)
cs_val.evaluate(val,pred_val)
print(cs_val)
print()
print("Metrics: Testing data")
cs_test = MyChunkScore(test)
cs_test.evaluate(test,pred_test)
print(cs_test)
###################################################################################################################
print("==========================================================")
print("Confussion Matrix: sapeee!") # DICT HAS TO BE FROM THE SAME PART (TRAIN, VAL OR TEST)
conf_matrix = cm.build_confusion_matrix(val.seq_list, pred_val, sp.get_num_states())
for id,_dict in conf_matrix.items():
name = val.y_dict.get_label_name(id)
print("::",name)
for k,v in _dict.items():
name_in = val.y_dict.get_label_name(k)
print(" %s: %i" % (name_in,v))
#cm.plot_confusion_bar_graph(conf_matrix, val.y_dict, range(sp.get_num_states()), 'Confusion matrix')
###################################################################################################################
temp = [(v,k) for k,v in train.y_dict.items() if k in ['B','I','O']]
temp.sort()
names_train = [k for v,k in temp]
temp = [(v,k) for k,v in val.y_dict.items() if k in ['B','I','O']]
temp.sort()
names_val = [k for v,k in temp]
temp = [(v,k) for k,v in test.y_dict.items() if k in ['B','I','O']]
temp.sort()
names_test = [k for v,k in temp]
Y_train = join_data_tags_bio(train.seq_list)
Y_val = join_data_tags_bio(val.seq_list)
Y_test = join_data_tags_bio(test.seq_list)
Y_train_pred = join_data_tags_bio(pred_train)
Y_val_pred = join_data_tags_bio(pred_val)
Y_test_pred = join_data_tags_bio(pred_test)
print("Metrics: Training data")
print(classification_report(Y_train, Y_train_pred, target_names=names_train))
print("Accuracy: ",accuracy_score(Y_train,Y_train_pred))
print("Metrics: Validation data")
print(classification_report(Y_val , Y_val_pred , target_names=names_val))
print("Accuracy: ",accuracy_score(Y_val,Y_val_pred))
print("Metrics: Testing data")
print(classification_report(Y_test , Y_test_pred , target_names=names_test))
print("Accuracy: ",accuracy_score(Y_test,Y_test_pred))
"""
yg = [val.y_dict.get_label_name(id) for id in Y_val]
yp = [val.y_dict.get_label_name(id) for id in Y_val_pred]
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion matrix of the classifier')
fig.colorbar(cax)
ax.set_xticklabels([''] + names_val)
ax.set_yticklabels([''] + names_val)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.show()
"""
print("Debugging!!!")
ipdb.set_trace()
print("Doggy style!!!") | mit |