repo_name
stringlengths 6
103
| path
stringlengths 4
209
| copies
stringclasses 325
values | size
stringlengths 4
7
| content
stringlengths 838
1.04M
| license
stringclasses 15
values |
---|---|---|---|---|---|
jmargeta/scikit-learn | sklearn/semi_supervised/label_propagation.py | 2 | 14014 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
if sparse.isspmatrix(X):
self.X_ = X
else:
self.X_ = np.asarray(X)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label proagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schölkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
Akshay0724/scikit-learn | sklearn/metrics/__init__.py | 28 | 3604 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import fowlkes_mallows_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import calinski_harabaz_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import mean_squared_log_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'mean_squared_log_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
ivankreso/stereo-vision | scripts/egomotion_evaluation/bumblebee/plot_path.py | 1 | 7322 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import math
filepath1 = "/home/kivan/Projects/cv-stereo/build/vo_batch_debug/release/results/bb2_tracker_freak_7_1/bb.txt"
filepath2 = "/home/kivan/Projects/cv-stereo/build/vo_batch_debug/release/results/bb2_tracker_freak_7_1_ba/bb.txt"
#filepath1 = "/home/kivan/Projects/cv-stereo/build/vo_batch_debug/release/results/bb2_tracker_freak_7_2/bb.txt"
#filepath2 = "/home/kivan/Projects/cv-stereo/build/vo_batch_debug/release/results/bb2_tracker_freak_7_2_ba/bb.txt"
gt_pts = np.array(np.loadtxt(filepath1))
#gt_pts = np.array(np.loadtxt('/home/kivan/Projects/datasets/KITTI/poses/07.txt'))
#gt_pts = np.array(np.loadtxt('/home/kivan/Projects/datasets/KITTI/poses/00.txt'))
#gt_pts = np.array(np.loadtxt('gt_track_tsukuba_crop.txt'))
#gt_pts = np.array(np.loadtxt('vo_bb_libvisotracker.txt'))
#gt_pts = np.array(np.loadtxt('vo_bb_bfm.txt'))
#gt_pts = np.array(np.loadtxt('vo_07_libvisotracker.txt'))
#gt_pts = np.array(np.loadtxt('vo_07_bfm.txt'))
#gt_pts = np.array(np.loadtxt('vo_tsukuba_bfm.txt'))
#gt_pts = np.array(np.loadtxt('vo_tsukuba_libviso_refiner.txt'))
vo_pts = np.array(np.loadtxt(filepath2))
#vo_pts = np.array(np.loadtxt('/home/kivan/Projects/cv-stereo/build/vo_sba/release/vo.txt'))
#vo_pts = np.array(np.loadtxt('/home/kivan/Projects/cv-stereo/build/vo_sba/release/sba_vo.txt'))
#vo_pts = np.array(np.loadtxt('vo_00_bfm.txt'))
#vo_pts = np.array(np.loadtxt('vo_00_libviso.txt'))
#vo_pts = np.array(np.loadtxt('vo_tsukuba_libviso.txt'))
#vo_pts = np.array(np.loadtxt('vo_tsukuba_libviso_subpixel.txt'))
#vo_pts = np.array(np.loadtxt('vo_tsukuba_bfm_refiner.txt'))
#vo_pts = np.array(np.loadtxt('vo_tsukuba_libviso.txt'))
#vo_pts = np.array(np.loadtxt('vo_tsukuba_libviso_refiner.txt'))
#vo_pts = np.array(np.loadtxt('vo_tsukuba_libvisotracker_refinernew.txt'))
#vo_pts = np.array(np.loadtxt('vo_bb_libvisotracker_refiner.txt'))
#vo_pts = np.array(np.loadtxt('vo_bb_bfm_refiner.txt'))
#vo_pts = np.array(np.loadtxt('vo_07_libvisotracker_refiner.txt'))
#vo_pts = np.array(np.loadtxt('vo_07_bfm_refiner.txt'))
if gt_pts.shape[0] != vo_pts.shape[0]:
print("GT and VO data not the same size\n")
exit(-1)
gt_pts3D = np.zeros((gt_pts.shape[0], 3))
vo_pts3D = np.zeros((vo_pts.shape[0], 3))
for i in range(len(vo_pts)):
vo_pts3D[i,0] = vo_pts[i,3]
vo_pts3D[i,1] = vo_pts[i,7]
vo_pts3D[i,2] = vo_pts[i,11]
gt_pts3D[i,0] = gt_pts[i,3]
gt_pts3D[i,1] = gt_pts[i,7]
gt_pts3D[i,2] = gt_pts[i,11]
fig_path = plt.figure(figsize=(14,8))
plt.axes().set_aspect('equal')
plt.plot(gt_pts3D[:,0], gt_pts3D[:,2], color='r')
plt.plot(gt_pts3D[::100,0], gt_pts3D[::100,2], marker='.', color='k', ls="")
plt.plot(gt_pts3D[0,0], gt_pts3D[0,2], marker='o', color='r', ls="")
plt.plot(vo_pts3D[:,0], vo_pts3D[:,2], color='b')
plt.plot(vo_pts3D[::100,0], vo_pts3D[::100,2], marker='.', color='k', ls='')
plt.plot(vo_pts3D[0,0], vo_pts3D[0,2], marker='o', color='b', ls="")
#for i in range(0,len(vo_pts3D),10):
# #plt.text(vo_pts3D[i,0]+2, vo_pts3D[i,2]+2, str(i), color='b')
# plt.text(gt_pts3D[i,0]+2, gt_pts3D[i,2]+2, str(i), color='r')
plt.xlabel("x (m)", fontsize=26)
plt.ylabel("z (m)", fontsize=26)
plt.legend(loc="upper left", fontsize=22)
plt.title(filepath1+"\n"+filepath2, fontsize=12)
plt.show()
exit()
#for i in range(0, vo_pts3D.shape[0], 5):
# #plt.text(vo_pts3D[i,3], vo_pts3D[i,11], str(vo_pts3D[i,7]), color='b')
# plt.text(vo_pts3D[i,3], vo_pts3D[i,11], '{0:.{1}f}'.format(vo_pts3D[i,7], 1) + " (" + str(i) + ")", color='b')
## plt.text(gt_pts3D[i,0]+2, gt_pts3D[i,1]+2, str(i), color='r')
# angle between 2 vectors defined by 3 points using dot product
def calcphi(pt1,pt2,pt3):
v1=pt2-pt1
v2=pt3-pt2
return math.degrees(math.acos(np.dot(v1,v2)/np.linalg.norm(v1)/np.linalg.norm(v2)))
# angle between 2 vectors using vector product (-90, 90)
def calcphi2vec(v1,v2):
return math.degrees(math.asin(np.linalg.norm(np.cross(v1, v2)) / np.linalg.norm(v1)/np.linalg.norm(v2)))
def calcphi2(pt1,pt2,pt3):
v1=pt2-pt1
v2=pt3-pt2
return calcphi2vec(v1,v2)
# angular movement data
gt_phis=np.array([calcphi2(gt_pts3D[i-1],gt_pts3D[i],gt_pts3D[i+1]) for i in range(1,len(gt_pts3D)-1)])
vo_phis=np.array([calcphi2(vo_pts3D[i-1],vo_pts3D[i],vo_pts3D[i+1]) for i in range(1,len(vo_pts3D)-1)])
# angular movement difference between gps od visual odometry
# cant do this before vo and gps paths are not mapped with starting point and rotation offset
#gps_vo_phis=[calcphi2vec(gt_pts3D[i]-gt_pts3D[i-1], vo_pts3D[i]-vo_pts3D[i-1]) for i in range(1,len(vo_pts3D))]
# speed movement data
gps_speed=np.array([np.linalg.norm(gt_pts3D[i]-gt_pts3D[i-1]) for i in range(1,len(vo_pts3D))])
vo_speed=np.array([np.linalg.norm(vo_pts3D[i]-vo_pts3D[i-1]) for i in range(1,len(vo_pts3D))])
#print (gt_phis[0:10])
#print (vo_phis[0:10])
#print([gt_pts3D[i] for i in range(0,10)])
#print([vo_pts3D[i] for i in range(0,10)])
#print([vo_pts3D[i]-vo_pts3D[i-1] for i in range(1,10)])
#print(calcphi(vo_pts3D[2-2],vo_pts3D[2-1],vo_pts3D[2]))
#plt.plot(gt_pts3D[:10,0], gt_pts3D[:10,1], marker='o', color='r')
#plt.plot(vo_pts3D[:10,0], vo_pts3D[:10,1], marker='o', color='b')
trans_mse = np.mean(np.square(gps_speed - vo_speed))
trans_mae = np.mean(np.abs(gps_speed - vo_speed))
print("translation error MSE: ", trans_mse)
print("translation error MAE: ", trans_mae)
fig_speed = plt.figure(figsize=(12,8))
plt.plot(range(1,len(vo_pts3D)), gps_speed, marker='o', color='r', label="GPS")
plt.plot(range(1,len(vo_pts3D)), vo_speed, marker='o', color='b', label="visual odometry")
plt.title("MSE = " + str(trans_mse)[:5] + ", MAE = " + str(trans_mae)[:5], fontsize=30)
#plt.title('Speed', fontsize=14)
plt.xlabel('time (s)', fontsize=30)
plt.ylabel('distance (m)', fontsize=30)
plt.legend(fontsize=24)
# plot scale error of visual odometry
fig_scale = plt.figure(figsize=(12,8))
scale_err = np.array(gps_speed) / np.array(vo_speed)
plt.plot(scale_err, marker='o', color='r')
plt.plot([0,120], [1.0,1.0], ls="--", color="k")
#fig_scale.suptitle('Scale error', fontsize=18)
plt.xlabel('time (s)', fontsize=30)
plt.ylabel('scale error (gps / odometry)', fontsize=30)
#print(gt_phis)
#print(vo_phis)
#print(np.square(gt_phis - vo_phis))
#print((gt_phis - vo_phis))
#print(np.square(gt_phis - vo_phis))
rot_mse = np.mean(np.square(gt_phis - vo_phis))
rot_mae = np.mean(np.abs(gt_phis - vo_phis))
print("rotation error MSE: ", rot_mse)
print("rotation error MAE: ", rot_mae)
fig_rot = plt.figure(figsize=(12,8))
plt.plot(range(1,len(vo_pts3D)-1), gt_phis, marker='o', color='r', label="GPS rotation angles")
plt.plot(range(1,len(vo_pts3D)-1), vo_phis, marker='o', color='b', label="odometry rotation angles")
#plt.plot(range(1,len(vo_pts3D)-1), gps_vo_phis[:-1], marker='o', color='b', label="TODO")
plt.xlabel('time (s)', fontsize=26)
plt.ylabel('angle (deg)', fontsize=26)
#plt.text(45, 20, "average error = " + str(rot_avgerr)[:5], color='b', fontsize=16)
plt.title("MSE = " + str(rot_mse)[:5] + ", MAE = " + str(rot_mae)[:5], fontsize=26)
plt.legend(fontsize=22)
fig_path.savefig("plot_path_diff.pdf", bbox_inches='tight')
fig_speed.savefig("plot_speed.pdf", bbox_inches='tight')
fig_scale.savefig('plot_scale_error.pdf', bbox_inches='tight')
fig_rot.savefig("plot_rotation_diff.pdf", bbox_inches='tight')
plt.show()
| bsd-3-clause |
Fireblend/scikit-learn | sklearn/datasets/olivetti_faces.py | 197 | 4688 | """Modified Olivetti faces dataset.
The original database was available from (now defunct)
http://www.uk.research.att.com/facedatabase.html
The version retrieved here comes in MATLAB format from the personal
web page of Sam Roweis:
http://www.cs.nyu.edu/~roweis/
There are ten different images of each of 40 distinct subjects. For some
subjects, the images were taken at different times, varying the lighting,
facial expressions (open / closed eyes, smiling / not smiling) and facial
details (glasses / no glasses). All the images were taken against a dark
homogeneous background with the subjects in an upright, frontal position (with
tolerance for some side movement).
The original dataset consisted of 92 x 112, while the Roweis version
consists of 64x64 images.
"""
# Copyright (c) 2011 David Warde-Farley <wardefar at iro dot umontreal dot ca>
# License: BSD 3 clause
from io import BytesIO
from os.path import join, exists
from os import makedirs
try:
# Python 2
import urllib2
urlopen = urllib2.urlopen
except ImportError:
# Python 3
import urllib.request
urlopen = urllib.request.urlopen
import numpy as np
from scipy.io.matlab import loadmat
from .base import get_data_home, Bunch
from ..utils import check_random_state
from ..externals import joblib
DATA_URL = "http://cs.nyu.edu/~roweis/data/olivettifaces.mat"
TARGET_FILENAME = "olivetti.pkz"
# Grab the module-level docstring to use as a description of the
# dataset
MODULE_DOCS = __doc__
def fetch_olivetti_faces(data_home=None, shuffle=False, random_state=0,
download_if_missing=True):
"""Loader for the Olivetti faces data-set from AT&T.
Read more in the :ref:`User Guide <olivetti_faces>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
shuffle : boolean, optional
If True the order of the dataset is shuffled to avoid having
images of the same person grouped.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : optional, integer or RandomState object
The seed or the random number generator used to shuffle the
data.
Returns
-------
An object with the following attributes:
data : numpy array of shape (400, 4096)
Each row corresponds to a ravelled face image of original size 64 x 64 pixels.
images : numpy array of shape (400, 64, 64)
Each row is a face image corresponding to one of the 40 subjects of the dataset.
target : numpy array of shape (400, )
Labels associated to each face image. Those labels are ranging from
0-39 and correspond to the Subject IDs.
DESCR : string
Description of the modified Olivetti Faces Dataset.
Notes
------
This dataset consists of 10 pictures each of 40 individuals. The original
database was available from (now defunct)
http://www.uk.research.att.com/facedatabase.html
The version retrieved here comes in MATLAB format from the personal
web page of Sam Roweis:
http://www.cs.nyu.edu/~roweis/
"""
data_home = get_data_home(data_home=data_home)
if not exists(data_home):
makedirs(data_home)
if not exists(join(data_home, TARGET_FILENAME)):
print('downloading Olivetti faces from %s to %s'
% (DATA_URL, data_home))
fhandle = urlopen(DATA_URL)
buf = BytesIO(fhandle.read())
mfile = loadmat(buf)
faces = mfile['faces'].T.copy()
joblib.dump(faces, join(data_home, TARGET_FILENAME), compress=6)
del mfile
else:
faces = joblib.load(join(data_home, TARGET_FILENAME))
# We want floating point data, but float32 is enough (there is only
# one byte of precision in the original uint8s anyway)
faces = np.float32(faces)
faces = faces - faces.min()
faces /= faces.max()
faces = faces.reshape((400, 64, 64)).transpose(0, 2, 1)
# 10 images per class, 400 images total, each class is contiguous.
target = np.array([i // 10 for i in range(400)])
if shuffle:
random_state = check_random_state(random_state)
order = random_state.permutation(len(faces))
faces = faces[order]
target = target[order]
return Bunch(data=faces.reshape(len(faces), -1),
images=faces,
target=target,
DESCR=MODULE_DOCS)
| bsd-3-clause |
ericvandenbergfb/spark | python/pyspark/ml/base.py | 22 | 5840 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABCMeta, abstractmethod
import copy
from pyspark import since
from pyspark.ml.param import Params
from pyspark.ml.param.shared import *
from pyspark.ml.common import inherit_doc
from pyspark.sql.functions import udf
from pyspark.sql.types import StructField, StructType, DoubleType
@inherit_doc
class Estimator(Params):
"""
Abstract class for estimators that fit models to data.
.. versionadded:: 1.3.0
"""
__metaclass__ = ABCMeta
@abstractmethod
def _fit(self, dataset):
"""
Fits a model to the input dataset. This is called by the default implementation of fit.
:param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame`
:returns: fitted model
"""
raise NotImplementedError()
@since("1.3.0")
def fit(self, dataset, params=None):
"""
Fits a model to the input dataset with optional parameters.
:param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame`
:param params: an optional param map that overrides embedded params. If a list/tuple of
param maps is given, this calls fit on each param map and returns a list of
models.
:returns: fitted model(s)
"""
if params is None:
params = dict()
if isinstance(params, (list, tuple)):
return [self.fit(dataset, paramMap) for paramMap in params]
elif isinstance(params, dict):
if params:
return self.copy(params)._fit(dataset)
else:
return self._fit(dataset)
else:
raise ValueError("Params must be either a param map or a list/tuple of param maps, "
"but got %s." % type(params))
@inherit_doc
class Transformer(Params):
"""
Abstract class for transformers that transform one dataset into another.
.. versionadded:: 1.3.0
"""
__metaclass__ = ABCMeta
@abstractmethod
def _transform(self, dataset):
"""
Transforms the input dataset.
:param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame`
:returns: transformed dataset
"""
raise NotImplementedError()
@since("1.3.0")
def transform(self, dataset, params=None):
"""
Transforms the input dataset with optional parameters.
:param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame`
:param params: an optional param map that overrides embedded params.
:returns: transformed dataset
"""
if params is None:
params = dict()
if isinstance(params, dict):
if params:
return self.copy(params)._transform(dataset)
else:
return self._transform(dataset)
else:
raise ValueError("Params must be a param map but got %s." % type(params))
@inherit_doc
class Model(Transformer):
"""
Abstract class for models that are fitted by estimators.
.. versionadded:: 1.4.0
"""
__metaclass__ = ABCMeta
@inherit_doc
class UnaryTransformer(HasInputCol, HasOutputCol, Transformer):
"""
Abstract class for transformers that take one input column, apply transformation,
and output the result as a new column.
.. versionadded:: 2.3.0
"""
@abstractmethod
def createTransformFunc(self):
"""
Creates the transform function using the given param map. The input param map already takes
account of the embedded param map. So the param values should be determined
solely by the input param map.
"""
raise NotImplementedError()
@abstractmethod
def outputDataType(self):
"""
Returns the data type of the output column.
"""
raise NotImplementedError()
@abstractmethod
def validateInputType(self, inputType):
"""
Validates the input type. Throw an exception if it is invalid.
"""
raise NotImplementedError()
def transformSchema(self, schema):
inputType = schema[self.getInputCol()].dataType
self.validateInputType(inputType)
if self.getOutputCol() in schema.names:
raise ValueError("Output column %s already exists." % self.getOutputCol())
outputFields = copy.copy(schema.fields)
outputFields.append(StructField(self.getOutputCol(),
self.outputDataType(),
nullable=False))
return StructType(outputFields)
def _transform(self, dataset):
self.transformSchema(dataset.schema)
transformUDF = udf(self.createTransformFunc(), self.outputDataType())
transformedDataset = dataset.withColumn(self.getOutputCol(),
transformUDF(dataset[self.getInputCol()]))
return transformedDataset
| apache-2.0 |
RecipeML/Recipe | recipe/evaluate_algorithm.py | 1 | 3207 | # -*- coding: utf-8 -*-
"""
Copyright 2016 Walter José
This file is part of the RECIPE Algorithm.
The RECIPE is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your option)
any later version.
RECIPE is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. See http://www.gnu.org/licenses/.
"""
import warnings
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
import load_pipeline as load
from sklearn.pipeline import make_pipeline
# from sklearn import cross_validation
from sklearn.model_selection import train_test_split
from sklearn import metrics
def evaluate_algorithm(mlAlgorithm, dataTraining, seed, dataSeed, internalCV,metric):
"""Evaluate a single algorithm
Parameters
----------
mlAlgorithm: string
is the pipeline that will be evaluated.
dataTraining:
The data used to train the choosen method.
seed: int
The seed used to control the GP behaviour.
dataSeed:int
The seed to control the data resample each x generations.
internalCV: int
The number of folds in the internal cross-validation procedure.
"""
try:
#Load the dataset:
df = pd.read_csv(dataTraining, header=0, delimiter=",")
class_name = df.columns.values.tolist()[-1]
#Apply a filter if the data has categorical data (sklean does not accept this type of data):
objectList = list(df.select_dtypes(include=['object']).columns)
if (class_name in objectList and len(objectList)>=1):
df = df.apply(LabelEncoder().fit_transform)
#Set the trainining data and target (classes):
training_data = df.ix[:,:-1].values
training_target = df[class_name].values
pipe = load.load_pipeline(mlAlgorithm)
#Verify if the pipeline is valid. Otherwise, return 0.0 as evaluation
try:
pipeline=make_pipeline(*pipe)
except Exception as exc:
warnings.warn("ERROR PIPELINE CREATION: " + mlAlgorithm,UserWarning)
return -0.5
# # To shuffle data in the cv according to the dataSeed:
# n_samples = training_data.shape[0]
# cv = cross_validation.ShuffleSplit(n_samples, n_iter=1, train_size=0.67,
# test_size=0.33, random_state=dataSeed)
#
# #Fit the final model generated by the pipeline:
# scores = cross_validation.cross_val_score(pipeline, training_data, training_target,
# cv=cv, scoring=metric)
#
# result = scores.mean()
X_train, X_test, y_train, y_test = train_test_split(training_data, training_target, test_size=0.33, random_state=dataSeed,stratify=training_target)
pipeline.fit(X_train,y_train)
predictedTest = pipeline.predict(X_test)
expectedTest = y_test
f1Test = metrics.f1_score(expectedTest, predictedTest, average='weighted')
result = f1Test
return result
except (KeyboardInterrupt, SystemExit):
return 0.0
except Exception as e:
warnings.warn("ERROR PIPELINE: "+ str(e) + " -> " + mlAlgorithm,UserWarning)
return 0.0
| gpl-3.0 |
mkraemer67/plugml | plugml/feature.py | 1 | 3221 | import numpy as np
from nltk.corpus import stopwords
from nltk.stem.lancaster import LancasterStemmer
from nltk.tokenize import RegexpTokenizer
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import Imputer, StandardScaler
class RawFeature(object):
def __init__(self, data, col):
self.name = col
self._scaler = None
self._imputer = None
self._vec = None
self.data = self._preprocess(data[col])
self._setup()
self.data = self.transform(self.data, False)
def _preprocess(self, data):
return np.matrix(data, dtype=np.double).T
def _setup(self):
self.featNames = [self.name + "##raw"]
self.dim = 1
def transform(self, data, preprocess=True):
if data[0] != None:
feats = self._preprocess(data) if preprocess else data
if self._vec:
feats = self._vec.transform(feats)
else:
feats = [np.nan for i in range(self.dim)]
if not self._imputer:
self._imputer = Imputer()
self._imputer.fit(feats)
feats = self._imputer.transform(feats)
if not self._scaler:
# Sparse matrices cannot be normalized regarding mean.
# This detection should not be done with try/except though.
try:
self._scaler = StandardScaler()
self._scaler.fit(feats)
except:
self._scaler = StandardScaler(with_mean=False)
self._scaler.fit(feats)
feats = self._scaler.transform(feats)
return feats
def __getitem__(self, key):
return self.data[key]
class CategoricalFeature(RawFeature):
def _preprocess(self, data):
prefix = self.name + "##cat##"
return [{prefix+cat.lower():1 for cat in arr} for arr in data]
def _setup(self):
self._vec = DictVectorizer()
self._vec.fit(self.data)
self.featNames = self._vec.get_feature_names()
self.dim = len(self.featNames)
class TextFeature(RawFeature):
def __init__(self, data, col):
self._tokenizer = RegexpTokenizer(r"\w\w+")
self._stemmer = LancasterStemmer()
self._stopwords = stopwords.words("english")
self._prefix = col + "##txt##"
super(TextFeature, self).__init__(data, col)
def _preprocess(self, data):
return [self._tokenizeText(text) for text in data]
def _setup(self):
self._vec = TfidfVectorizer(
analyzer=lambda x: x,
sublinear_tf=True,
smooth_idf=True,
norm='l2',
min_df=2,
max_df=0.9
)
self._vec.fit(self.data)
self.featNames = self._vec.get_feature_names()
self.dim = len(self.featNames)
def _tokenizeText(self, text):
tokens = [t.lower() for t in self._tokenizer.tokenize(text.decode('utf-8'))]
tokens = [self._prefix + self._stemmer.stem(t) for t in tokens
if t not in self._stopwords]
return tokens | apache-2.0 |
glewis17/fuel | fuel/datasets/base.py | 5 | 12883 | import collections
from abc import ABCMeta, abstractmethod
from six import add_metaclass
from picklable_itertools import iter_, izip
from fuel.schemes import SequentialExampleScheme
from fuel.streams import DataStream
from fuel.utils import Subset
@add_metaclass(ABCMeta)
class Dataset(object):
"""A dataset.
Dataset classes implement the interface to a particular dataset. The
interface consists of a number of routines to manipulate so called
"state" objects, e.g. open, reset and close them.
Parameters
----------
sources : tuple of strings, optional
The data sources to load and return by :meth:`get_data`. By default
all data sources are returned.
axis_labels : dict, optional
Maps source names to tuples of strings describing axis semantics,
one per axis. Defaults to `None`, i.e. no information is available.
Attributes
----------
sources : tuple of strings
The sources this dataset will provide when queried for data e.g.
``('features',)`` when querying only the data from MNIST.
provides_sources : tuple of strings
The sources this dataset *is able to* provide e.g. ``('features',
'targets')`` for MNIST (regardless of which data the data stream
actually requests). Any implementation of a dataset should set this
attribute on the class (or at least before calling ``super``).
example_iteration_scheme : :class:`.IterationScheme` or ``None``
The iteration scheme the class uses in order to produce a stream of
examples.
Notes
-----
Datasets should only implement the interface; they are not expected to
perform the iteration over the actual data. As such, they are
stateless, and can be shared by different parts of the library
simultaneously.
"""
provides_sources = None
default_transformers = tuple()
def __init__(self, sources=None, axis_labels=None):
if not self.provides_sources:
raise ValueError("dataset does not have `provides_sources`")
if sources is not None:
if not sources or not all(source in self.provides_sources
for source in sources):
raise ValueError("unable to provide requested sources")
self.sources = sources
self.axis_labels = axis_labels
@property
def sources(self):
if not hasattr(self, '_sources'):
return self.provides_sources
return self._sources
@sources.setter
def sources(self, sources):
self._sources = sources
def apply_default_transformers(self, stream):
"""Applies default transformers to a stream.
Parameters
----------
stream : :class:`~.streams.AbstractDataStream`
A data stream.
"""
for (cls, args, kwargs) in self.default_transformers:
args = [stream] + args
stream = cls(*args, **kwargs)
return stream
@property
def example_iteration_scheme(self):
if not hasattr(self, '_example_iteration_scheme'):
raise AttributeError("dataset does not provide an example "
"iteration scheme")
return self._example_iteration_scheme
@example_iteration_scheme.setter
def example_iteration_scheme(self, value):
self._example_iteration_scheme = value
def get_example_stream(self):
return DataStream(self, iteration_scheme=self.example_iteration_scheme)
def open(self):
"""Return the state if the dataset requires one.
Datasets which e.g. read files from disks require open file
handlers, and this sort of stateful information should be handled
by the data stream.
Returns
-------
state : object
An object representing the state of a dataset.
"""
pass
def reset(self, state):
"""Resets the state.
Parameters
----------
state : object
The current state.
Returns
-------
state : object
A reset state.
Notes
-----
The default implementation closes the state and opens a new one. A
more efficient implementation (e.g. using ``file.seek(0)`` instead
of closing and re-opening the file) can override the default one in
derived classes.
"""
self.close(state)
return self.open()
def next_epoch(self, state):
"""Switches the dataset state to the next epoch.
The default implementation for this method is to reset the state.
Parameters
----------
state : object
The current state.
Returns
-------
state : object
The state for the next epoch.
"""
return self.reset(state)
def close(self, state):
"""Cleanly close the dataset e.g. close file handles.
Parameters
----------
state : object
The current state.
"""
pass
@abstractmethod
def get_data(self, state=None, request=None):
"""Request data from the dataset.
.. todo::
A way for the dataset to communicate which kind of requests it
accepts, and a way to communicate what kind of request is being
sent when supporting multiple.
Parameters
----------
state : object, optional
The state as returned by the :meth:`open` method. The dataset
can use this to e.g. interact with files when needed.
request : object, optional
If supported, the request for a particular part of the data
e.g. the number of examples to return, or the indices of a
particular minibatch of examples.
Returns
-------
tuple
A tuple of data matching the order of :attr:`sources`.
"""
def filter_sources(self, data):
"""Filter the requested sources from those provided by the dataset.
A dataset can be asked to provide only a subset of the sources it
can provide (e.g. asking MNIST only for the features, not for the
labels). A dataset can choose to use this information to e.g. only
load the requested sources into memory. However, in case the
performance gain of doing so would be negligible, the dataset can
load all the data sources and then use this method to return only
those requested.
Parameters
----------
data : tuple of objects
The data from all the sources i.e. should be of the same length
as :attr:`provides_sources`.
Returns
-------
tuple
A tuple of data matching :attr:`sources`.
Examples
--------
>>> import numpy
>>> class Random(Dataset):
... provides_sources = ('features', 'targets')
... def get_data(self, state=None, request=None):
... data = (numpy.random.rand(10), numpy.random.randn(3))
... return self.filter_sources(data)
>>> Random(sources=('targets',)).get_data() # doctest: +SKIP
(array([-1.82436737, 0.08265948, 0.63206168]),)
"""
return tuple([d for d, s in zip(data, self.provides_sources)
if s in self.sources])
class IterableDataset(Dataset):
"""Creates a dataset from a set of iterables.
Parameters
----------
iterables : :class:`~collections.OrderedDict` or iterable
The iterable(s) to provide interface to. The iterables' `__iter__`
method should return a new iterator over the iterable. If an
:class:`~collections.OrderedDict` is given, its values should be
iterables providing data, and its keys strings that are used as
source names. If a single iterable is given, it will be given the
source ``data``.
Attributes
----------
iterables : list
A list of :class:`~collections.Iterable` objects.
Notes
-----
Internally, this method uses picklable iterools's ``_iter``
function, providing picklable alternatives to some iterators such as
:func:`range`, :func:`tuple`, and even :class:`file`. However, if the
iterable returns a different kind of iterator that is not picklable,
you might want to consider using the :func:`.do_not_pickle_attributes`
decorator.
To iterate over a container in batches, combine this dataset with the
:class:`Batch` data stream.
"""
example_iteration_scheme = None
def __init__(self, iterables, **kwargs):
if isinstance(iterables, dict):
self.provides_sources = tuple(iterables.keys())
else:
self.provides_sources = ('data',)
super(IterableDataset, self).__init__(**kwargs)
if isinstance(iterables, dict):
if not all(isinstance(iterable, collections.Iterable)
for iterable in iterables.values()):
raise ValueError
self.iterables = [iterables[source] for source in self.sources]
else:
if not isinstance(iterables, collections.Iterable):
raise ValueError
self.iterables = [iterables]
try:
if len(set(len(iterable) for iterable in self.iterables)) != 1:
raise ValueError("iterables are of different length")
except TypeError:
pass
@property
def num_examples(self):
try:
num_examples, = set(len(iterable) for iterable in self.iterables)
return num_examples
except TypeError:
return float('nan')
def open(self):
iterators = [iter_(channel) for channel in self.iterables]
return izip(*iterators)
def get_data(self, state=None, request=None):
if state is None or request is not None:
raise ValueError
return next(state)
class IndexableDataset(Dataset):
"""Creates a dataset from a set of indexable containers.
Parameters
----------
indexables : :class:`~collections.OrderedDict` or indexable
The indexable(s) to provide interface to. This means it must
support the syntax ```indexable[0]``. If an
:class:`~collections.OrderedDict` is given, its values should be
indexables providing data, and its keys strings that are used as
source names. If a single indexable is given, it will be given the
source ``data``.
Attributes
----------
indexables : list
A list of indexable objects.
Notes
-----
If the indexable data is very large, you might want to consider using
the :func:`.do_not_pickle_attributes` decorator to make sure the data
doesn't get pickled with the dataset, but gets reloaded/recreated
instead.
This dataset also uses the source names to create properties that
provide easy access to the data.
"""
def __init__(self, indexables, start=None, stop=None, **kwargs):
if isinstance(indexables, dict):
self.provides_sources = tuple(indexables.keys())
else:
self.provides_sources = ('data',)
super(IndexableDataset, self).__init__(**kwargs)
if isinstance(indexables, dict):
self.indexables = [indexables[source][start:stop]
for source in self.sources]
if not all(len(indexable) == len(self.indexables[0])
for indexable in self.indexables):
raise ValueError("sources have different lengths")
else:
self.indexables = [indexables]
self.example_iteration_scheme = SequentialExampleScheme(
self.num_examples)
self.start = start
self.stop = stop
self.subset = Subset(slice(start, stop), self.num_examples)
def __getattr__(self, attr):
if (attr not in ['sources', 'indexables', '_sources'] and
attr in self.sources):
return self.indexables[self.sources.index(attr)]
raise AttributeError
# Without explicitly defining a trivial __setstate__ method,
# the __getattribute__ method would call the __getattr__ method,
# which would raise an AttributeError. This causes problems
# when unpickling.
def __setstate__(self, dict):
self.__dict__ = dict
@property
def num_examples(self):
return len(self.indexables[0])
def get_data(self, state=None, request=None):
if state is not None or request is None:
raise ValueError
return tuple(self.subset.index_within_subset(indexable, request)
for indexable in self.indexables)
| mit |
mgraffg/simplegp | SimpleGP/tests/test_bayes.py | 1 | 13624 | # Copyright 2013 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from SimpleGP import Classification
from SimpleGP import SparseArray
from nose.tools import assert_almost_equals
from test_classification import *
def test_sparse_array_class_mean():
np.set_printoptions(precision=3)
Xs = map(lambda x: SparseArray.fromlist(x), X.T)
y = SparseArray.fromlist(cl)
a = y.mean_per_cl(Xs, y.class_freq(np.unique(cl).shape[0]))
for i, v in zip(np.unique(cl), np.array(a).T):
print X[cl == i].mean(axis=0), v
assert np.all(X[cl == i].mean(axis=0) == np.array(v))
def test_sparse_array_class_var():
np.set_printoptions(precision=3)
Xs = map(lambda x: SparseArray.fromlist(x), X.T)
y = SparseArray.fromlist(cl)
kfreq = y.class_freq(np.unique(cl).shape[0])
a = y.mean_per_cl(Xs, kfreq)
var = y.var_per_cl(Xs, a, kfreq)
for i, v in zip(np.unique(cl), np.array(var).T):
print np.var(X[cl == i], axis=0), v
map(lambda (x, y):
assert_almost_equals(x, y),
zip(np.var(X[cl == i], axis=0), np.array(v)))
def test_sparse_class_freq():
y = SparseArray.fromlist(cl)
f = y.class_freq(np.unique(cl).shape[0])
for i in np.unique(cl):
assert (cl == i).sum() == f[i]
def test_sparse_joint_log_likelihood():
np.set_printoptions(precision=3)
import array
import math
from sklearn.naive_bayes import GaussianNB
m = GaussianNB().fit(X, cl)
llh = m._joint_log_likelihood(X)
Xs = map(lambda x: SparseArray.fromlist(x), X.T)
y = SparseArray.fromlist(cl)
kfreq = y.class_freq(np.unique(cl).shape[0])
mu = y.mean_per_cl(Xs, kfreq)
var = y.var_per_cl(Xs, mu, kfreq)
tot = sum(kfreq)
cl_prior = array.array('d', map(lambda x: math.log(x / tot), kfreq))
llh2 = SparseArray.joint_log_likelihood(Xs, mu, var, cl_prior)
map(lambda (x, y):
map(lambda (x1, y1):
assert_almost_equals(x1, y1),
zip(x, y)),
zip(llh, llh2))
def test_bayes_train():
from SimpleGP import Bayes
Xs = map(lambda x: SparseArray.fromlist(x), X.T)
y = SparseArray.fromlist(cl)
bayes = Bayes().train(Xs, y)
assert bayes._class_freq
b2 = Bayes(class_freq=bayes._class_freq,
ncl=3).train(Xs, y)
map(lambda (x, y): assert_almost_equals(x, y),
zip(bayes._class_freq, b2._class_freq))
def test_bayes_llh():
from SimpleGP import Bayes
Xs = map(lambda x: SparseArray.fromlist(x), X.T)
y = SparseArray.fromlist(cl)
bayes = Bayes().train(Xs, y)
bayes.create_population()
a = bayes.joint_log_likelihood(1)
assert np.all(a)
a = bayes.joint_log_likelihood(0)
print bayes._elm_constants[0]
b = bayes.joint_log_likelihood(0)
map(lambda (x, y):
map(lambda (x1, y1): assert_almost_equals(x1, y1),
zip(x, y)),
zip(a, b))
def test_bayes_eval_ind():
from SimpleGP import Bayes
Xs = map(lambda x: SparseArray.fromlist(x), X.T)
y = SparseArray.fromlist(cl)
bayes = Bayes().train(Xs, y)
bayes.create_population()
a = bayes.eval(0)
assert a.isfinite()
bayes._elm_constants[0][-1] < bayes._ntrees
assert bayes.eval(1).isfinite()
def test_bayes_predict_proba():
from SimpleGP import Bayes
Xs = map(lambda x: SparseArray.fromlist(x), X.T)
y = SparseArray.fromlist(cl)
bayes = Bayes().train(Xs, y)
bayes.create_population()
bayes.fitness(0)
pr = bayes.predict(bayes._x, ind=0).tonparray()
a = bayes.predict_proba(bayes._x, ind=0)
assert np.all(pr == a.argmax(axis=1))
def test_bayes_no_use_st():
from SimpleGP import Bayes
try:
Bayes(use_st=1)
assert False
except NotImplementedError:
pass
def test_bayes_BER():
from SimpleGP import Bayes
Xs = map(lambda x: SparseArray.fromlist(x), X.T)
y = SparseArray.fromlist(cl)
bayes = Bayes().train(Xs, y)
bayes.create_population()
yh = bayes.eval(1)
b = bayes.distance(bayes._f, yh)
b2 = Classification.BER(bayes._f.tonparray(),
yh.tonparray())
print b, b2
assert b == b2
def test_bayes_predict():
from SimpleGP import Bayes
np.random.seed(0)
index = np.arange(X.shape[0])
np.random.shuffle(index)
Xtr = map(SparseArray.fromlist, X[index[:120]].T)
ytr = SparseArray.fromlist(cl[index[:120]])
Xvs = map(SparseArray.fromlist, X[index[120:]].T)
yvs = SparseArray.fromlist(cl[index[120:]])
bayes = Bayes().train(Xtr, ytr)
bayes.set_test(Xvs, y=yvs)
bayes.create_population()
print bayes.fitness(1)
pr = bayes.predict(Xvs)
b2 = Bayes().train(Xvs, yvs)
b2.create_population()
b2.set_early_stopping_ind(bayes._early_stopping)
map(lambda (x, y):
map(lambda (x1, y1): assert_almost_equals(x1, y1), zip(x, y)),
zip(bayes._elm_constants[1][0], b2._elm_constants[0][0]))
pr2 = b2.predict(Xvs, ind=0)
assert pr.SSE(pr2) == 0
map(lambda (x, y):
map(lambda (x1, y1): assert_almost_equals(x1, y1), zip(x, y)),
zip(bayes._elm_constants[1][0], b2._elm_constants[0][0]))
b2.predict_proba(Xvs, ind=0)
map(lambda (x, y):
map(lambda (x1, y1): assert_almost_equals(x1, y1), zip(x, y)),
zip(bayes._elm_constants[1][0], b2._elm_constants[0][0]))
assert b2._fitness[0] == -np.inf
def test_adaBayes_beta():
from SimpleGP import AdaBayes
np.random.seed(0)
index = np.arange(X.shape[0])
np.random.shuffle(index)
Xtr = map(SparseArray.fromlist, X[index[:120]].T)
ytr = SparseArray.fromlist(cl[index[:120]])
Xvs = map(SparseArray.fromlist, X[index[120:]].T)
yvs = SparseArray.fromlist(cl[index[120:]])
bayes = AdaBayes(ntimes=1, generations=3, popsize=3).train(Xtr, ytr)
bayes.set_test(Xvs, y=yvs)
bayes.create_population()
bayes.fitness(0)
assert bayes._beta_constants[0] is not None
assert bayes._beta_constants[0] < 1
def test_adaBayes_predict():
from SimpleGP import AdaBayes
np.random.seed(0)
index = np.arange(X.shape[0])
np.random.shuffle(index)
Xtr = map(SparseArray.fromlist, X[index[:120]].T)
ytr = SparseArray.fromlist(cl[index[:120]])
Xvs = map(SparseArray.fromlist, X[index[120:]].T)
yvs = SparseArray.fromlist(cl[index[120:]])
bayes = AdaBayes(ntimes=1, generations=3, popsize=3).train(Xtr, ytr)
bayes.set_test(Xvs, y=yvs)
bayes.create_population()
bayes.fitness(0)
score = bayes._score_yh.copy()
assert bayes._beta_constants[0] is not None
pr = bayes.predict(bayes._test_set, ind=0).tonparray()
assert np.all(bayes._score_yh == score)
assert np.all(bayes._score_yh.argmax(axis=1) == pr)
def test_adaBayes_distribution():
from SimpleGP import AdaBayes
np.random.seed(0)
index = np.arange(X.shape[0])
np.random.shuffle(index)
Xtr = map(SparseArray.fromlist, X[index[:120]].T)
ytr = SparseArray.fromlist(cl[index[:120]])
Xvs = map(SparseArray.fromlist, X[index[120:]].T)
yvs = SparseArray.fromlist(cl[index[120:]])
bayes = AdaBayes(ntimes=1, generations=3, popsize=3).train(Xtr, ytr)
assert bayes._x[0].size() == Xtr[0].size() * bayes._frac_ts
assert_almost_equals(bayes._prob.sum(), 1)
bayes.set_test(Xvs, y=yvs)
bayes.create_population()
bayes.fitness(0)
print bayes._prob
assert (bayes._prob < 0.5).sum() == bayes._prob.shape[0]
assert_almost_equals(bayes._prob.sum(), 1)
def test_adaBayes_inds():
from SimpleGP import AdaBayes
np.random.seed(0)
index = np.arange(X.shape[0])
np.random.shuffle(index)
Xtr = map(SparseArray.fromlist, X[index[:120]].T)
ytr = SparseArray.fromlist(cl[index[:120]])
Xvs = map(SparseArray.fromlist, X[index[120:]].T)
yvs = SparseArray.fromlist(cl[index[120:]])
bayes = AdaBayes(ntimes=1, generations=3, popsize=3).train(Xtr, ytr)
bayes.set_test(Xvs, y=yvs)
bayes.create_population()
fit = bayes.fitness(0)
beta = bayes._beta_constants[0]
assert len(bayes._inds) == 1
fit2 = bayes.fitness(0)
assert fit == fit2
beta2 = bayes._beta_constants[0]
assert beta == beta2
def test_adaBayes_save_restore_early_stopping():
from SimpleGP import AdaBayes
np.random.seed(0)
index = np.arange(X.shape[0])
np.random.shuffle(index)
Xtr = map(SparseArray.fromlist, X[index[:120]].T)
ytr = SparseArray.fromlist(cl[index[:120]])
Xvs = map(SparseArray.fromlist, X[index[120:]].T)
yvs = SparseArray.fromlist(cl[index[120:]])
bayes = AdaBayes(ntimes=1, generations=3, popsize=3).train(Xtr, ytr)
bayes.set_test(Xvs, y=yvs)
bayes.create_population()
bayes.fitness(0)
bayes.save_ind(0)
bayes._beta_constants[0] = None
bayes.restore_ind(0)
assert bayes._beta_constants[0] is not None
assert np.all(bayes.early_stopping[-1] == bayes._beta_constants[0])
def test_adaBayes_multiple():
from SimpleGP import AdaBayes
np.random.seed(0)
index = np.arange(X.shape[0])
np.random.shuffle(index)
Xtr = map(SparseArray.fromlist, X[index[:120]].T)
ytr = SparseArray.fromlist(cl[index[:120]])
Xvs = map(SparseArray.fromlist, X[index[120:]].T)
yvs = SparseArray.fromlist(cl[index[120:]])
bayes = AdaBayes(ntimes=1, generations=3, popsize=10,
ntrees=2).train(Xtr, ytr)
bayes.set_test(Xvs, y=yvs)
bayes.create_population()
bayes.fitness(7)
print map(lambda x: x[0], bayes._inds), bayes._et
bayes.fitness(1)
print map(lambda x: x[0], bayes._inds), bayes._et
assert len(bayes._inds) == 2
def test_adaBayes():
from SimpleGP import AdaBayes
class A:
def __init__(self):
self._times = 0
def callback(self, a):
assert a._p is None
self._times += 1
np.random.seed(0)
index = np.arange(X.shape[0])
np.random.shuffle(index)
Xtr = map(SparseArray.fromlist, X[index[:120]].T)
ytr = SparseArray.fromlist(cl[index[:120]])
Xvs = map(SparseArray.fromlist, X[index[120:]].T)
yvs = SparseArray.fromlist(cl[index[120:]])
a = A()
bayes = AdaBayes(ntimes=5,
generations=3, popsize=3).fit(Xtr, ytr,
test=Xvs,
test_y=yvs,
callback=a.callback)
assert len(bayes._inds) >= a._times
print a._times
# def test_ibayes_predict():
# from SimpleGP import IBayes
# Xs = map(lambda x: SparseArray.fromlist(x), X.T)
# y = SparseArray.fromlist(cl)
# bayes = IBayes().train(Xs, y)
# bayes.create_population()
# print bayes.fitness(0)
# print bayes.fitness(1)
# a = bayes.predict_proba(bayes._x, ind=0)
# b = bayes.predict_proba(bayes._x, ind=1)
# r = np.concatenate((a, b), axis=1)
# bayes._inds.append([None,
# bayes.population[1].copy(),
# bayes._p_constants[1].copy(),
# bayes._elm_constants[1],
# bayes._class_freq])
# r2 = bayes.predict_proba(bayes._x, ind=0)
# assert np.fabs(r - r2).mean() == 0
# bayes._inds = []
# b1 = bayes.predict_proba(bayes._x, ind=1)
# assert np.fabs(b - b1).mean() == 0
# def test_ibayes():
# from SimpleGP import IBayes
# Xs = map(lambda x: SparseArray.fromlist(x), X.T)
# y = SparseArray.fromlist(cl)
# bayes = IBayes().train(Xs, y)
# bayes.create_population()
# bayes.fitness(0)
# bayes.fitness(2)
# a = bayes.predict_llh(bayes._x, ind=0)
# b = bayes.predict_llh(bayes._x, ind=2)
# c1 = np.concatenate((a, b), axis=1)
# bayes.prev_llh(a)
# c2 = bayes.eval(2)
# assert np.all(c1.argmax(axis=1) % bayes._ncl == c2)
# bayes._inds.append([None,
# bayes.population[0].copy(),
# bayes._p_constants[0].copy(),
# bayes._elm_constants[0],
# bayes._class_freq])
# c3 = bayes.predict_llh(bayes._x, 2).argmax(axis=1) % bayes._ncl
# assert np.all(c2 == c3)
# def test_ibayes_fit():
# from SimpleGP import IBayes
# def callback(self):
# if hasattr(self, 'calls'):
# self.calls += 1
# else:
# self.calls = 1
# print "*"*10
# np.random.seed(0)
# index = np.arange(X.shape[0])
# np.random.shuffle(index)
# Xtr = map(SparseArray.fromlist, X[index[:120]].T)
# ytr = SparseArray.fromlist(cl[index[:120]])
# Xvs = map(SparseArray.fromlist, X[index[120:]].T)
# yvs = SparseArray.fromlist(cl[index[120:]])
# bayes = IBayes(generations=2, popsize=10,
# ntimes=2, verbose=True).fit(Xtr, ytr,
# test=Xvs, test_y=yvs,
# callback=callback)
# print map(lambda x: x[0], bayes._inds)
# print len(bayes._inds), bayes.calls
# assert len(bayes._inds) == bayes.calls
| apache-2.0 |
shihuai/TCAI-2017 | models_config/segmentation_unet2d.py | 1 | 5043 | # -*- coding:UTF-8 -*-
# !/usr/bin/env python
#########################################################################
# File Name: unet2d.py
# Author: Banggui
# mail: [email protected]
# Created Time: 2017年04月23日 星期日 15时13分24秒
#########################################################################
import numpy as np
import cv2
import csv
from glob import glob
import pandas as pd
import os
import sys
import random
from keras.models import Model
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as K
from sklearn.metrics import jaccard_similarity_score
#from metric import dice_coef, dice_coef_loss
#from shapely.geometry import MultiPolygon, Polygon
#import shapely.wkt
#import shapely.affinity
from collections import defaultdict
#sys.path.append("./utils")
#from unet2d_data_provider import getDataProvider
from metric import dice_coef_loss, dice_coef, dice_coef_np
class UNet2D(object):
#网络参数初始化
def __init__(self, row = 256, col = 256, color_type = 1):
self.color_type = 1
self.row = row
self.col = col
self.model = self.unet2d_generator()
#定义一个unet2d网络模型
def unet2d_generator(self):
inputs = Input((self.color_type, self.row, self.col))
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)
conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)
up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)
up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)
model = Model(input=inputs, output=conv10)
#model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=[jaccard_coef, jaccard_coef_int, 'accuracy'])
model.compile(optimizer = Adam(lr = 1e-5), loss = dice_coef_loss, metrics = [dice_coef])
#model.compile(optimizer = 'sgd', loss = 'binary_crossentropy', metrics = ['accuracy'])
return model
#训练unet2d网络
def train_unet2d(self, trainX, trainY, valX, valY, batch_size = 8, epoch = 10):
print "trainX shape: ", trainX.shape
print "trainY shape: ", trainY.shape
print "valX shape: ", valX.shape
print "valY shape: ", valY.shape
for i in range(1):
self.model.fit(trainX, trainY, batch_size = 8, epochs = epoch, verbose = 1,
shuffle = True, validation_data = (valX, valY))
self.model.save_weights('./output/models/unet2d_model_' + str(self.row) + 'x' + str(self.col))
#导入网络模型参数
def load_mode(self):
self.model.load_weights('./output/models/unet2d_model_' + str(self.row) + 'x' + str(self.col))
#对测试数据进行预测
def predict_unet2d(self, testX):
mask_test = self.model.predict(testX, batch_size = 8, verbose = 1)
return mask_test
def getUNet2DModel(row, col, color_type):
return UNet2D(row, col, color_type)
| mit |
Fireblend/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 5 | 23953 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
XA = np.arange(45)
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.resize(np.arange(45), (5, 9))
XB = np.arange(32)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
Fireblend/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 248 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
DistrictDataLabs/yellowbrick | tests/test_text/test_freqdist.py | 1 | 2245 | # tests.test_text.test_freqdist
# Tests for the frequency distribution visualization
#
# Author: Rebecca Bilbro
# Created: 2017-03-22 15:27
#
# Copyright (C) 2018 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: test_freqdist.py [bd9cbb9] [email protected] $
"""
Tests for the frequency distribution text visualization
"""
##########################################################################
## Imports
##########################################################################
import pytest
from yellowbrick.datasets import load_hobbies
from yellowbrick.text.freqdist import *
from tests.base import IS_WINDOWS_OR_CONDA, VisualTestCase
from sklearn.feature_extraction.text import CountVectorizer
##########################################################################
## Data
##########################################################################
corpus = load_hobbies()
##########################################################################
## FreqDist Tests
##########################################################################
class TestFreqDist(VisualTestCase):
@pytest.mark.xfail(
IS_WINDOWS_OR_CONDA,
reason="font rendering different in OS and/or Python; see #892",
)
def test_integrated_freqdist(self):
"""
Assert no errors occur during freqdist integration
"""
vectorizer = CountVectorizer()
docs = vectorizer.fit_transform(corpus.data)
features = vectorizer.get_feature_names()
visualizer = FreqDistVisualizer(features)
visualizer.fit(docs)
visualizer.finalize()
self.assert_images_similar(visualizer, tol=0.5) # w/o tol fails with RMS 0.121
def test_freqdist_quickmethod(self):
"""
Assert no errors occur during freqdist quickmethod
"""
vectorizer = CountVectorizer()
docs = vectorizer.fit_transform(corpus.data)
features = vectorizer.get_feature_names()
viz = freqdist(features, docs, show=False)
assert isinstance(viz, FreqDistVisualizer)
# yellowbrick.exceptions.ImageComparisonFailure: images not close (RMS 1.401)
self.assert_images_similar(viz, tol=1.5)
| apache-2.0 |
kracwarlock/neon | neon/datasets/tests/test_cifar100.py | 7 | 1060 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
from nose.plugins.attrib import attr
from neon.datasets.cifar100 import CIFAR100
from neon.backends.cpu import CPU
class TestCIFAR100(object):
tmp_repo = os.path.join(os.path.dirname(__file__), 'repo')
def setup(self):
os.makedirs(self.tmp_repo)
def teardown(self):
shutil.rmtree(self.tmp_repo, ignore_errors=True)
@attr('slow')
def test_fine_labels(self):
data = CIFAR100(coarse=False, repo_path=self.tmp_repo)
data.backend = CPU(rng_seed=0)
data.backend.actual_batch_size = 128
data.load()
assert len(data.inputs['train']) == 50000
assert len(data.targets['train'][0]) == 100
@attr('slow')
def test_coarse_labels(self):
data = CIFAR100(coarse=True, repo_path=self.tmp_repo)
data.backend = CPU(rng_seed=0)
data.backend.actual_batch_size = 128
data.load()
assert len(data.inputs['train']) == 50000
assert len(data.targets['train'][0]) == 20
| apache-2.0 |
Akshay0724/scikit-learn | examples/svm/plot_svm_margin.py | 12 | 2492 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors (margin away from hyperplane in direction
# perpendicular to hyperplane). This is sqrt(1+a^2) away vertically in
# 2-d.
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy - np.sqrt(1 + a ** 2) * margin
yy_up = yy + np.sqrt(1 + a ** 2) * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
KevinHCChen/wireless-aoa | scripts/_base.py | 2 | 6163 | """Utilities for the neural network modules
"""
# Author: Issam H. Laradji <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.fixes import expit as logistic_sigmoid
def identity(X):
"""Simply return the input array.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Same as the input data.
"""
return X
def logistic(X):
"""Compute the logistic function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
X_new : {array-like, sparse matrix}, shape (n_samples, n_features)
The transformed data.
"""
return logistic_sigmoid(X, out=X)
def tanh(X):
"""Compute the hyperbolic tan function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
X_new : {array-like, sparse matrix}, shape (n_samples, n_features)
The transformed data.
"""
return np.tanh(X, out=X)
def relu(X):
"""Compute the rectified linear unit function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
X_new : {array-like, sparse matrix}, shape (n_samples, n_features)
The transformed data.
"""
np.clip(X, 0, np.finfo(X.dtype).max, out=X)
return X
def softmax(X):
"""Compute the K-way softmax function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
X_new : {array-like, sparse matrix}, shape (n_samples, n_features)
The transformed data.
"""
tmp = X - X.max(axis=1)[:, np.newaxis]
np.exp(tmp, out=X)
X /= X.sum(axis=1)[:, np.newaxis]
return X
ACTIVATIONS = {'identity': identity, 'tanh': tanh, 'logistic': logistic,
'relu': relu, 'softmax': softmax}
def inplace_logistic_derivative(Z):
"""Compute the derivative of the logistic function given output value
from logistic function
It exploits the fact that the derivative is a simple function of the output
value from logistic function
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data which is output from logistic function
Returns
-------
Z_new : {array-like, sparse matrix}, shape (n_samples, n_features)
The transformed data.
"""
return Z * (1 - Z)
def inplace_tanh_derivative(Z):
"""Compute the derivative of the hyperbolic tan function given output value
from hyperbolic tan
It exploits the fact that the derivative is a simple function of the output
value from hyperbolic tan
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data which is output from hyperbolic tan function
Returns
-------
Z_new : {array-like, sparse matrix}, shape (n_samples, n_features)
The transformed data.
"""
return 1 - (Z ** 2)
def inplace_relu_derivative(Z):
"""Compute the derivative of the rectified linear unit function given output
value from relu
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data which is output from some relu
Returns
-------
Z_new : {array-like, sparse matrix}, shape (n_samples, n_features)
The transformed data.
"""
return (Z > 0).astype(Z.dtype)
DERIVATIVES = {'tanh': inplace_tanh_derivative,
'logistic': inplace_logistic_derivative,
'relu': inplace_relu_derivative}
def squared_loss(y_true, y_pred):
"""Compute the squared loss for regression.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) values.
y_pred : array-like or label indicator matrix
Predicted values, as returned by a regression estimator.
Returns
-------
loss : float
The degree to which the samples are correctly predicted.
"""
return ((y_true - y_pred) ** 2).mean() / 2
def log_loss(y_true, y_prob):
"""Compute Logistic loss for classification.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels.
y_prob : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
Returns
-------
loss : float
The degree to which the samples are correctly predicted.
"""
y_prob = np.clip(y_prob, 1e-10, 1 - 1e-10)
if y_prob.shape[1] == 1:
y_prob = np.append(1 - y_prob, y_prob, axis=1)
if y_true.shape[1] == 1:
y_true = np.append(1 - y_true, y_true, axis=1)
return -np.sum(y_true * np.log(y_prob)) / y_prob.shape[0]
def binary_log_loss(y_true, y_prob):
"""Compute binary logistic loss for classification.
This is identical to log_loss in binary classification case,
but is kept for its use in multilabel case.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels.
y_prob : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
Returns
-------
loss : float
The degree to which the samples are correctly predicted.
"""
y_prob = np.clip(y_prob, 1e-10, 1 - 1e-10)
return -np.sum(y_true * np.log(y_prob) +
(1 - y_true) * np.log(1 - y_prob)) / y_prob.shape[0]
LOSS_FUNCTIONS = {'squared_loss': squared_loss, 'log_loss': log_loss,
'binary_log_loss': binary_log_loss}
| mit |
DistrictDataLabs/yellowbrick | yellowbrick/model_selection/dropping_curve.py | 1 | 15251 | # yellowbrick.model_selection.dropping_curve
# Implements a feature dropping curve visualization for model selection.
#
# Author: Charles Guan
# Created: Wed Dec 8 15:03:00 2021 -0800
"""
Implements a random-input-dropout curve visualization for model selection.
Another common name: neuron dropping curve (NDC), in neural decoding research
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
from yellowbrick.base import ModelVisualizer
from yellowbrick.style import resolve_colors
from yellowbrick.exceptions import YellowbrickValueError
from sklearn.model_selection import validation_curve as sk_validation_curve
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
# Default ticks for the model selection curve, relative number of features
DEFAULT_FEATURE_SIZES = np.linspace(0.1, 1.0, 5)
##########################################################################
# DroppingCurve visualizer
##########################################################################
class DroppingCurve(ModelVisualizer):
"""
Selects random subsets of features and estimates the training and
crossvalidation performance. Subset sizes are swept to visualize a
feature-dropping curve.
The visualization plots the score relative to each subset and shows
the number of (randomly selected) features needed to achieve a score.
The curve is often shaped like log(1+x). For example, see:
https://www.frontiersin.org/articles/10.3389/fnsys.2014.00102/full
Parameters
----------
estimator : a scikit-learn estimator
An object that implements ``fit`` and ``predict``, can be a
classifier, regressor, or clusterer so long as there is also a valid
associated scoring metric.
Note that the object is cloned for each validation.
feature_sizes: array-like, shape (n_values,)
default: ``np.linspace(0.1,1.0,5)``
Relative or absolute numbers of input features that will be used to
generate the learning curve. If the dtype is float, it is regarded as
a fraction of the maximum number of features, otherwise it is
interpreted as absolute numbers of features.
groups : array-like, with shape (n_samples,)
Optional group labels for the samples used while splitting the dataset
into train/test sets.
ax : matplotlib.Axes object, optional
The axes object to plot the figure on.
logx : boolean, optional
If True, plots the x-axis with a logarithmic scale.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
see the scikit-learn
`cross-validation guide <https://bit.ly/2MMQAI7>`_
for more information on the possible strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string or scorer callable object / function with signature
``scorer(estimator, X, y)``. See scikit-learn model evaluation
documentation for names of possible metrics.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used to generate feature subsets.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
feature_sizes_ : array, shape = (n_unique_ticks,), dtype int
Numbers of features that have been used to generate the
dropping curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores_ : array, shape (n_ticks, n_cv_folds)
Scores on training sets.
train_scores_mean_ : array, shape (n_ticks,)
Mean training data scores for each training split
train_scores_std_ : array, shape (n_ticks,)
Standard deviation of training data scores for each training split
valid_scores_ : array, shape (n_ticks, n_cv_folds)
Scores on validation set.
valid_scores_mean_ : array, shape (n_ticks,)
Mean scores for each validation split
valid_scores_std_ : array, shape (n_ticks,)
Standard deviation of scores for each validation split
Examples
--------
>>> from yellowbrick.model_selection import DroppingCurve
>>> from sklearn.naive_bayes import GaussianNB
>>> model = DroppingCurve(GaussianNB())
>>> model.fit(X, y)
>>> model.show()
Notes
-----
This visualizer is based on sklearn.model_selection.validation_curve
"""
def __init__(
self,
estimator,
ax=None,
feature_sizes=DEFAULT_FEATURE_SIZES,
groups=None,
logx=False,
cv=None,
scoring=None,
n_jobs=None,
pre_dispatch='all',
random_state=None,
**kwargs
):
# Initialize the model visualizer
super(DroppingCurve, self).__init__(estimator, ax=ax, **kwargs)
# Validate the feature sizes
feature_sizes = np.asarray(feature_sizes)
if feature_sizes.ndim != 1:
raise YellowbrickValueError(
"must specify 1-D array of feature sizes, '{}' is not valid".format(
repr(feature_sizes)
)
)
# Set the metric parameters to be used later
self.feature_sizes = feature_sizes
self.groups = groups
self.logx = logx
self.cv = cv
self.scoring = scoring
self.n_jobs = n_jobs
self.pre_dispatch = pre_dispatch
self.random_state = random_state
def fit(self, X, y=None):
"""
Fits the feature dropping curve with the wrapped model to the specified data.
Draws training and cross-validation score curves and saves the scores to the
estimator.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
# Get feature_sizes in whole numbers
n_features = X.shape[-1]
if np.issubdtype(self.feature_sizes.dtype, np.integer):
if (self.feature_sizes <= 0).all() or (self.feature_sizes >= n_features).all():
raise YellowbrickValueError('Expected feature sizes in [0, n_features]')
self.feature_sizes_ = self.feature_sizes
else:
if (self.feature_sizes <= 0.0).all() or (self.feature_sizes >= 1.0).all():
raise YellowbrickValueError('Expected feature ratio in [0,1]')
self.feature_sizes_ = np.ceil(n_features * self.feature_sizes).astype(int)
# The easiest way to prepend a random-dropout layer is to use
# SelectKBest with a random scoring function.
feature_dropping_pipeline = make_pipeline(
SelectKBest(
score_func=lambda X,y: np.random.default_rng(self.random_state).standard_normal(size=X.shape[-1])
),
self.estimator,
)
# arguments to pass to sk_validation_curve
skvc_kwargs = {
key: self.get_params()[key]
for key in (
"groups",
"cv",
"scoring",
"n_jobs",
"pre_dispatch",
)
}
self.train_scores_, self.valid_scores_ = sk_validation_curve(
feature_dropping_pipeline,
X,
y,
param_name="selectkbest__k",
param_range=self.feature_sizes_,
**skvc_kwargs
)
# compute the mean and standard deviation of the training data
self.train_scores_mean_ = np.mean(self.train_scores_, axis=1)
self.train_scores_std_ = np.std(self.train_scores_, axis=1)
# compute the mean and standard deviation of the validation data
self.valid_scores_mean_ = np.mean(self.valid_scores_, axis=1)
self.valid_scores_std_ = np.std(self.valid_scores_, axis=1)
# draw the curves on the current axes
self.draw()
return self
def draw(self, **kwargs):
"""
Renders the training and validation learning curves.
"""
# Specify the curves to draw and their labels
labels = ("Training Score", "Cross Validation Score")
curves = (
(self.train_scores_mean_, self.train_scores_std_),
(self.valid_scores_mean_, self.valid_scores_std_),
)
# Get the colors for the train and test curves
colors = resolve_colors(n_colors=2)
# Plot the fill betweens first so they are behind the curves.
for idx, (mean, std) in enumerate(curves):
# Plot one standard deviation above and below the mean
self.ax.fill_between(
self.feature_sizes_, mean - std, mean + std, alpha=0.25, color=colors[idx]
)
# Plot the mean curves so they are in front of the variance fill
for idx, (mean, _) in enumerate(curves):
self.ax.plot(
self.feature_sizes_, mean, "o-", color=colors[idx], label=labels[idx]
)
if self.logx:
self.ax.set_xscale("log")
return self.ax
def finalize(self, **kwargs):
"""
Add the title, legend, and other visual final touches to the plot.
"""
# Set the title of the figure
self.set_title("Random-feature dropping curve for {}".format(self.name))
# Add the legend
self.ax.legend(frameon=True, loc="best")
# Set the axis labels
self.ax.set_xlabel("number of features")
self.ax.set_ylabel("score")
##########################################################################
# Quick Method
##########################################################################
def dropping_curve(
estimator,
X,
y,
feature_sizes=DEFAULT_FEATURE_SIZES,
groups=None,
ax=None,
logx=False,
cv=None,
scoring=None,
n_jobs=None,
pre_dispatch='all',
random_state=None,
show=True,
**kwargs
) -> DroppingCurve:
"""
Displays a random-feature dropping curve, comparing feature size to training
and cross validation scores. The dropping curve aims to show how a model
improves with more information.
This helper function wraps the DroppingCurve class for one-off analysis.
Parameters
----------
estimator : a scikit-learn estimator
An object that implements ``fit`` and ``predict``, can be a
classifier, regressor, or clusterer so long as there is also a valid
associated scoring metric.
Note that the object is cloned for each validation.
X : array-like, shape (n_samples, n_features)
Input vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
feature_sizes: array-like, shape (n_values,)
default: ``np.linspace(0.1,1.0,5)``
Relative or absolute numbers of input features that will be used to
generate the learning curve. If the dtype is float, it is regarded as
a fraction of the maximum number of features, otherwise it is
interpreted as absolute numbers of features.
groups : array-like, with shape (n_samples,)
Optional group labels for the samples used while splitting the dataset
into train/test sets.
ax : matplotlib.Axes object, optional
The axes object to plot the figure on.
logx : boolean, optional
If True, plots the x-axis with a logarithmic scale.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
see the scikit-learn
`cross-validation guide <https://bit.ly/2MMQAI7>`_
for more information on the possible strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string or scorer callable object / function with signature
``scorer(estimator, X, y)``. See scikit-learn model evaluation
documentation for names of possible metrics.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
pre_dispatch : integer or string, optional
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The string can
be an expression like '2*n_jobs'.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used to generate feature subsets.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Returns
-------
dc : DroppingCurve
Returns the fitted visualizer.
"""
dc = DroppingCurve(
estimator,
feature_sizes=feature_sizes,
groups=groups,
ax=ax,
logx=logx,
cv=cv,
scoring=scoring,
n_jobs=n_jobs,
pre_dispatch=pre_dispatch,
random_state=random_state,
**kwargs
)
# Fit and show the visualizer
dc.fit(X, y)
if show:
dc.show()
else:
dc.finalize()
return dc | apache-2.0 |
Fireblend/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 352 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
ivankreso/stereo-vision | scripts/egomotion_kitti_eval/old/kitti_eval/plot_kitti_path.py | 1 | 2072 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import math
filepath1 = "/home/kivan/Projects/datasets/KITTI/poses/07.txt"
filepath2 = "/home/kivan/Dropbox/experiment_data/img/07_nodf.txt"
#filepath2 = "/home/kivan/Dropbox/experiment_data/img/07_df.txt"
#filepath2 = "/home/kivan/Dropbox/experiment_data/img/07_wgt.txt"
#filepath1 = "/home/kivan/Projects/cv-stereo/data/GT/Tsukuba/tsukuba_gt_crop.txt"
#filepath2 = "/home/kivan/Projects/cv-stereo/build/vo_batch_debug/release/results/tsukuba_tracker_ncc_best/00.txt"
#filepath2 = "/home/kivan/Projects/cv-stereo/build/vo_batch_debug/release/results/tsukuba_tracker_ncc_best_df/00.txt"
gt_pts = np.array(np.loadtxt(filepath1))
vo_pts = np.array(np.loadtxt(filepath2))
if gt_pts.shape[0] != vo_pts.shape[0]:
print("GT and VO data not the same size\n")
exit(-1)
gt_pts3D = np.zeros((gt_pts.shape[0], 3))
vo_pts3D = np.zeros((vo_pts.shape[0], 3))
for i in range(len(vo_pts)):
vo_pts3D[i,0] = vo_pts[i,3]
vo_pts3D[i,1] = vo_pts[i,7]
vo_pts3D[i,2] = vo_pts[i,11]
gt_pts3D[i,0] = gt_pts[i,3]
gt_pts3D[i,1] = gt_pts[i,7]
gt_pts3D[i,2] = gt_pts[i,11]
fig_path = plt.figure(figsize=(14,8))
plt.axes().set_aspect('equal')
plt.plot(gt_pts3D[:,0], gt_pts3D[:,2], color='r', label="GT")
plt.plot(gt_pts3D[::100,0], gt_pts3D[::100,2], marker='.', color='k', ls="")
plt.plot(gt_pts3D[0,0], gt_pts3D[0,2], marker='o', color='r', ls="")
plt.plot(vo_pts3D[:,0], vo_pts3D[:,2], color='b', label="VO")
plt.plot(vo_pts3D[::100,0], vo_pts3D[::100,2], marker='.', color='k', ls='')
plt.plot(vo_pts3D[0,0], vo_pts3D[0,2], marker='o', color='b', ls="")
#for i in range(0,len(vo_pts3D),10):
# #plt.text(vo_pts3D[i,0]+2, vo_pts3D[i,2]+2, str(i), color='b')
# plt.text(gt_pts3D[i,0]+2, gt_pts3D[i,2]+2, str(i), color='r')
plt.xlabel("x (m)", fontsize=26)
plt.ylabel("z (m)", fontsize=26)
plt.legend(loc="upper right", fontsize=30)
#plt.title(filepath1+"\n"+filepath2, fontsize=12)
plt.xlim([-200, 20])
plt.ylim([-100, 130])
fig_path.savefig("plot_path_diff.pdf", bbox_inches='tight')
plt.show()
| bsd-3-clause |
PiscesDream/Ideas | ML/single_ann/single_ann.py | 1 | 9652 | '''
rewrite on 14/09/16:
W is only a vector
'''
from numpy import *
import theano
import theano.tensor as T
import gzip
import cPickle
import numpy
import time
class neuron(object):
def __init__(self, rng, input, n_in = None, n_out = None,
activation = T.nnet.sigmoid, W = None, b = None, shift_b = None, index = -1):
#input can be anything supporting multiply and add
if n_in is None:
n_in = len(fan_in)
#output can be multi-object
if n_out is None:
n_out = 1
#W is a vector here
if W is None:
W_values = numpy.asarray(rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
if shift_b: lin_output = lin_output + shift_b
self.output = activation(lin_output)
#for convenient
self.single_out = self.output[0]
self.params = [self.W, self.b]
self.index = index
class neuron_layer(object):
def __init__(self, rng, input, n_in, n_out, activation):
self.input = input
self.neurons = []
self.params = []
for i in range(n_out):
self.neurons.append(neuron(input = input, rng = rng, n_in = n_in, activation = activation))
self.params.extend(self.neurons[-1].params)
self.output = T.concatenate(map(lambda x: x.output, self.neurons), 1)
self.W = T.concatenate(map(lambda x: x.W, self.neurons), 1)
self.b = T.concatenate(map(lambda x: x.b, self.neurons)).flatten()
class shift_bias_layer(object):
def __init__(self, rng, input, n_in, n_out, activation):
self.input = input
self.neurons = []
self.params = []
for i in range(n_out):
if i == 0:
self.neurons.append(neuron(input = input, rng = rng, n_in = n_in, activation = activation))
else:
self.neurons.append(neuron(input = input, rng = rng, n_in = n_in, shift_b = self.neurons[-1].output, activation = activation))
self.params.extend(self.neurons[-1].params)
self.output = T.concatenate(map(lambda x: x.output, self.neurons), 1)
self.W = T.concatenate(map(lambda x: x.W, self.neurons), 1)
self.b = T.concatenate(map(lambda x: x.b, self.neurons)).flatten()
class neuron_map(object):
def __init__(self, input, n_in, n_out, each_in = 10, hid_size = [10, 40]):
'''
input: the input data
n_out: the output size
each_in: the input size of each neuron
hid_size: [#neurons connected with input,
#hidden neurons]
'''
n_all = sum(hid_size)
neurons = []
for i in range(hid_size[0]):
neurons.append(neuron(input = input, n_in = n_in, n_out = 1))
neurons_connection = []
#create a pseudo connection
for i in range(hid_size[1]):
neurons_connection.append(choice(n_all, each_in))
#must create layer by layer,
#cannot use a future-created neuron
class ANN(object):
def __init__(self, n_in, n_out, lmbd = 0.01, hiddens = [10], shifted = True):
x = T.matrix('x')
y = T.ivector('y')
lr = T.scalar('lr')
# rng = numpy.random.RandomState(numpy.random.randint(2 ** 30))
rng = numpy.random.RandomState(32)
params = []
hid_layers = []
L2 = .0
n_hid = hiddens + [n_out]
for ind, ele in enumerate(n_hid):
if ind == 0:
input = x
n_in = n_in
else:
input = hid_layers[-1].output
n_in = n_hid[ind-1]
if ind == len(n_hid) - 1:
activation = T.nnet.softmax
layer = neuron(rng = rng, input = input, n_in = n_in, n_out = ele, activation = activation)
else:
activation = T.nnet.sigmoid
if shifted:
layer = shift_bias_layer(input = input, rng = rng, n_in = n_in, n_out = ele, activation = activation)
else:
layer = neuron_layer(input = input, rng = rng, n_in = n_in, n_out = ele, activation = activation)
hid_layers.append( layer)
L2 += T.sum(layer.W ** 2)
params.extend(layer.params)
nl = -T.mean(T.log(hid_layers[-1].output)[T.arange(y.shape[0]), y])
cost = nl + L2 * lmbd
grads = T.grad(cost, params)
updates = []
for param_i, grad_i in zip(params, grads):
updates.append((param_i, param_i - lr * grad_i))
y_pred = T.argmax(hid_layers[-1].output, 1)
errors = T.mean(T.neq(y_pred, y))
self.n_in = n_in
self.n_out = n_out
self.hiddens = hiddens
self.hid_layers = hid_layers
self.x = x
self.y = y
self.lr = lr
self.cost = cost
self.errors = errors
self.updates = updates
self.pred = y_pred
self.time = []
def fit(self, datasets, batch_size = 500, n_epochs = 200, lr = 0.01):
''' without validation'''
index = T.lscalar()
train_set_x, train_set_y = datasets[0]
test_set_x, test_set_y = datasets[1]
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
n_test_batches /= batch_size
train_model = theano.function([index], self.cost,
updates = self.updates,
givens = {
self.x: train_set_x[index * batch_size: (index + 1) * batch_size],
self.y: train_set_y[index * batch_size: (index + 1) * batch_size],
self.lr: lr})
test_model = theano.function([], self.errors,
givens = {
self.x: test_set_x,
self.y: test_set_y})
debug_f = theano.function([index], self.hid_layers[0].output.shape,
givens = {
self.x: test_set_x[index * batch_size : (index+1) * batch_size],
self.y: test_set_y[index * batch_size : (index+1) * batch_size]})
# print numpy.mean([debug_f(i) for i in xrange(n_test_batches)])
print(test_model())
# raw_input( debug_f(0))
print '...training'
maxiter = n_epochs
iteration = 0
while iteration < maxiter:
start_time = time.time()
iteration += 1
print 'iteration %d' % iteration
for minibatch_index in xrange(n_train_batches):
print '\tL of (%03d/%03d) = %f\r' % (minibatch_index, n_train_batches, train_model(minibatch_index)),
print ''
print 'error = %f' % test_model()
self.time.append(time.time()-start_time)
def __repr__(self):
return '<CNN: %r; HID: %r>' % (self.nkerns, self.nhiddens)
def pred(self, x):
return theano.function([], T.argmax(self.hid_layers[-1].output, 1),
givens = {self.x: x})()
def prob(self, x):
return theano.function([], self.hid_layers[-1].output,
givens = {self.x: x})()
def __repr__(self):
return '<ANN:%r-%r-%r>' % (self.n_in, self.hiddens, self.n_out)
def load_data(dataset, num = None):
print '... loading data'
f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
train_set = (numpy.concatenate([train_set[0], valid_set[0]], 0), numpy.concatenate([train_set[1], valid_set[1]], 0))
f.close()
def shared_dataset(data_xy, borrow=True, num = None):
data_x, data_y = data_xy
if num:
data_x = data_x[:num]
data_y = data_y[:num]
# data_y = boarden(10, data_y)
size = int(data_x.shape[1]**.5)
# data_x = data_x.reshape(data_x.shape[0], -1)
print data_x.shape, data_y.shape
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set, num = num)
# valid_set_x, valid_set_y = shared_dataset(valid_set, num = num)
train_set_x, train_set_y = shared_dataset(train_set, num = num)
rval = [(train_set_x, train_set_y), #(valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
if __name__ == '__main__':
theano.config.exception_verbosity='high'
theano.config.on_unused_input='ignore'
datasets = load_data('../../Data/mnist/mnist.pkl.gz')
cl = ANN(28 * 28, 10, hiddens = [20])
cl.fit(datasets, lr = 0.1)
| apache-2.0 |
woobe/h2o | py/testdir_0xdata_only/test_from_hdfs_hosts.py | 2 | 4197 | import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(3,
use_hdfs=True, hdfs_version='cdh3', hdfs_name_node='192.168.1.176')
else:
h2o_hosts.build_cloud_with_hosts(
use_hdfs=True, hdfs_version='cdh3', hdfs_name_node='192.168.1.176')
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_B_hdfs_files(self):
# larger set in my local dir
# fails because classes aren't integers
# "allstate_claim_prediction_train_set.zip",
csvFilenameAll = [
"3G_poker_shuffle",
"TEST-poker1000.csv",
# corrupt zip file?
# "allstate_claim_prediction_train_set.zip",
"and-testing.data",
"arcene2_train.both",
"arcene_train.both",
"bestbuy_test.csv",
"bestbuy_train.csv",
"billion_rows.csv.gz",
"covtype.13x.data",
"covtype.13x.shuffle.data",
"covtype.169x.data",
"covtype.4x.shuffle.data",
"covtype.data",
"covtype4x.shuffle.data",
"hhp.unbalanced.012.1x11.data.gz",
"hhp.unbalanced.012.data.gz",
"hhp.unbalanced.data.gz",
"hhp2.os.noisy.0_1.data",
"hhp2.os.noisy.9_4.data",
"hhp_9_14_12.data",
"leads.csv",
"prostate_long_1G.csv",
]
# pick 8 randomly!
if (1==0):
csvFilenameList = random.sample(csvFilenameAll,8)
# Alternatively: do the list in order! Note the order is easy to hard
else:
csvFilenameList = csvFilenameAll
# pop open a browser on the cloud
h2b.browseTheCloud()
timeoutSecs = 1000
# save the first, for all comparisions, to avoid slow drift with each iteration
firstglm = {}
for csvFilename in csvFilenameList:
# creates csvFilename.hex from file in hdfs dir
start = time.time()
print 'Parsing', csvFilename
csvPathname = "datasets/" + csvFilename
parseResult = h2i.import_parse(path=csvPathname, schema='hdfs',
timeoutSecs=timeoutSecs, retryDelaySecs=1.0)
print csvFilename, '\nparse time (python)', time.time() - start, 'seconds'
print csvFilename, '\nparse time (h2o):', parseResult['response']['time']
### print h2o.dump_json(parseResult['response'])
print "parse result:", parseResult['destination_key']
# I use this if i want the larger set in my localdir
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
### print h2o.dump_json(inspect)
cols = inspect['cols']
# look for nonzero num_missing_values count in each col
for i, colDict in enumerate(cols):
num_missing_values = colDict['num_missing_values']
if num_missing_values != 0:
### print "%s: col: %d, num_missing_values: %d" % (csvFilename, i, num_missing_values)
pass
### print h2o.dump_json(cols[0])
num_cols = inspect['num_cols']
num_rows = inspect['num_rows']
row_size = inspect['row_size']
ptype = inspect['type']
value_size_bytes = inspect['value_size_bytes']
response = inspect['response']
ptime = response['time']
print "num_cols: %s, num_rows: %s, row_size: %s, ptype: %s, \
value_size_bytes: %s, response: %s, time: %s" % \
(num_cols, num_rows, row_size, ptype, value_size_bytes, response, ptime)
# h2b.browseJsonHistoryAsUrlLastMatch("Inspect")
print "\n" + csvFilename
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
sidenver/ConstituentRetrofit | wordsim/wordsimDetail.py | 1 | 3962 | """Usage:
wordsimDetail.py -v <vectorsFile> -d <dataset> [-n <number>]
wordsimDetail.py -h | --help
take a word embedding file and evaluate it with word similarity
task using spearman rho, output the n most differntly ranked pair.
"""
import os
import logging
from docopt import docopt
import numpy
from collections import defaultdict
from scipy import linalg, stats
DATA_ROOT = os.path.dirname(os.path.abspath(__file__)) + "/data/en/"
class WordsimDetail:
def __init__(self, dataset):
logging.info("collecting datasets ..")
self.dataset = defaultdict(list)
for line in open(dataset):
self.dataset[dataset.split('/')[-1].split('.')[0]].append([float(w) if i == 2 else w for i, w in enumerate(line.strip().split())])
@staticmethod
def cos(vec1, vec2):
return vec1.dot(vec2)/(linalg.norm(vec1)*linalg.norm(vec2))
@staticmethod
def rho(vec1, vec2):
return stats.stats.spearmanr(vec1, vec2)[0]
@staticmethod
def load_vector(path):
try:
logging.info("loading vector ..")
if path[-3:] == ".gz":
import gzip
f = gzip.open(path, "rb")
else:
f = open(path, "rb")
except ValueError:
print "Oops! No such file. Try again .."
word2vec = {}
for wn, line in enumerate(f):
line = line.lower().strip()
word = line.split()[0]
word2vec[word] = numpy.array(map(float, line.split()[1:]))
logging.info("loaded vector {0} words found ..".format(len(word2vec.keys())))
return word2vec
@staticmethod
def pprint(result, mostDifferent, n_max=None):
from prettytable import PrettyTable
x = PrettyTable(["Dataset", "Found", "Not Found", "Score (rho)"])
x.align["Dataset"] = "l"
for k in sorted(result):
x.add_row([k, result[k][0], result[k][1], result[k][2]])
print x
if n_max:
detail = PrettyTable(["Word1", "Word2", "Pred", "Label", "Diff"])
detail.align["Word1"] = "l"
detail.align["Word2"] = "l"
for dif in mostDifferent[:n_max]:
detail.add_row([dif[2][0], dif[2][1], dif[0], dif[1], dif[0]-dif[1]])
print detail
@staticmethod
def listToRank(input):
indices = list(range(len(input)))
indices.sort(key=lambda x: input[x], reverse=True)
output = [0] * len(indices)
for i, x in enumerate(indices):
output[x] = i
return output
def rankByDifference(self, wordPairs, pred, label):
rankedPred = self.listToRank(pred)
rankedLabel = self.listToRank(label)
mostDifferent = sorted(zip(rankedPred, rankedLabel, wordPairs), key=lambda x: abs(x[0]-x[1]), reverse=True)
return mostDifferent
def evaluate(self, word_dict):
result = {}
vocab = word_dict.keys()
for file_name, data in self.dataset.items():
pred, label, found, notfound = [], [], 0, 0
wordPairs = []
for datum in data:
if datum[0] in vocab and datum[1] in vocab:
found += 1
pred.append(self.cos(word_dict[datum[0]], word_dict[datum[1]]))
label.append(datum[2])
wordPairs.append((datum[0], datum[1]))
else:
notfound += 1
result[file_name] = (found, notfound, self.rho(label, pred)*100)
mostDifferent = self.rankByDifference(wordPairs, pred, label)
return result, mostDifferent
if __name__ == "__main__":
commandParse = docopt(__doc__)
wordsim = WordsimDetail(commandParse['<dataset>'])
word2vec = wordsim.load_vector(commandParse['<vectorsFile>'])
result, mostDifferent = wordsim.evaluate(word2vec)
wordsim.pprint(result, mostDifferent, int(commandParse['<number>']))
| apache-2.0 |
DistrictDataLabs/yellowbrick | yellowbrick/cluster/silhouette.py | 1 | 12564 | # yellowbrick.cluster.silhouette
# Implements visualizers using the silhouette metric for cluster evaluation.
#
# Author: Benjamin Bengfort
# Author: Rebecca Bilbro
# Created: Mon Mar 27 10:09:24 2017 -0400
#
# Copyright (C) 2017 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: silhouette.py [57b563b] [email protected] $
"""
Implements visualizers that use the silhouette metric for cluster evaluation.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
import matplotlib.ticker as ticker
from sklearn.metrics import silhouette_score, silhouette_samples
from yellowbrick.utils import check_fitted
from yellowbrick.style import resolve_colors
from yellowbrick.cluster.base import ClusteringScoreVisualizer
## Packages for export
__all__ = ["SilhouetteVisualizer", "silhouette_visualizer"]
##########################################################################
## Silhouette Method for K Selection
##########################################################################
class SilhouetteVisualizer(ClusteringScoreVisualizer):
"""
The Silhouette Visualizer displays the silhouette coefficient for each
sample on a per-cluster basis, visually evaluating the density and
separation between clusters. The score is calculated by averaging the
silhouette coefficient for each sample, computed as the difference
between the average intra-cluster distance and the mean nearest-cluster
distance for each sample, normalized by the maximum value. This produces a
score between -1 and +1, where scores near +1 indicate high separation
and scores near -1 indicate that the samples may have been assigned to
the wrong cluster.
In SilhouetteVisualizer plots, clusters with higher scores have wider
silhouettes, but clusters that are less cohesive will fall short of the
average score across all clusters, which is plotted as a vertical dotted
red line.
This is particularly useful for determining cluster imbalance, or for
selecting a value for K by comparing multiple visualizers.
Parameters
----------
estimator : a Scikit-Learn clusterer
Should be an instance of a centroidal clustering algorithm (``KMeans``
or ``MiniBatchKMeans``). If the estimator is not fitted, it is fit when
the visualizer is fitted, unless otherwise specified by ``is_fitted``.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
colors : iterable or string, default: None
A collection of colors to use for each cluster group. If there are
fewer colors than cluster groups, colors will repeat. May also be a
Yellowbrick or matplotlib colormap string.
is_fitted : bool or str, default='auto'
Specify if the wrapped estimator is already fitted. If False, the
estimator will be fit when the visualizer is fit, otherwise, the
estimator will not be modified. If 'auto' (default), a helper method
will check if the estimator is fitted before fitting it again.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
silhouette_score_ : float
Mean Silhouette Coefficient for all samples. Computed via scikit-learn
`sklearn.metrics.silhouette_score`.
silhouette_samples_ : array, shape = [n_samples]
Silhouette Coefficient for each samples. Computed via scikit-learn
`sklearn.metrics.silhouette_samples`.
n_samples_ : integer
Number of total samples in the dataset (X.shape[0])
n_clusters_ : integer
Number of clusters (e.g. n_clusters or k value) passed to internal
scikit-learn model.
y_tick_pos_ : array of shape (n_clusters,)
The computed center positions of each cluster on the y-axis
Examples
--------
>>> from yellowbrick.cluster import SilhouetteVisualizer
>>> from sklearn.cluster import KMeans
>>> model = SilhouetteVisualizer(KMeans(10))
>>> model.fit(X)
>>> model.show()
"""
def __init__(self, estimator, ax=None, colors=None, is_fitted="auto", **kwargs):
# Initialize the visualizer bases
super(SilhouetteVisualizer, self).__init__(estimator, ax=ax, **kwargs)
# Visual Properties
# Use colors if it is given, otherwise attempt to use colormap which
# which will override colors. If neither is found, default to None.
# The colormap may yet still be found in resolve_colors
self.colors = colors
if "colormap" in kwargs:
self.colors = kwargs["colormap"]
def fit(self, X, y=None, **kwargs):
"""
Fits the model and generates the silhouette visualization.
"""
# TODO: decide to use this method or the score method to draw.
# NOTE: Probably this would be better in score, but the standard score
# is a little different and I'm not sure how it's used.
if not check_fitted(self.estimator, is_fitted_by=self.is_fitted):
# Fit the wrapped estimator
self.estimator.fit(X, y, **kwargs)
# Get the properties of the dataset
self.n_samples_ = X.shape[0]
self.n_clusters_ = self.estimator.n_clusters
# Compute the scores of the cluster
labels = self.estimator.predict(X)
self.silhouette_score_ = silhouette_score(X, labels)
self.silhouette_samples_ = silhouette_samples(X, labels)
# Draw the silhouette figure
self.draw(labels)
# Return the estimator
return self
def draw(self, labels):
"""
Draw the silhouettes for each sample and the average score.
Parameters
----------
labels : array-like
An array with the cluster label for each silhouette sample,
usually computed with ``predict()``. Labels are not stored on the
visualizer so that the figure can be redrawn with new data.
"""
# Track the positions of the lines being drawn
y_lower = 10 # The bottom of the silhouette
# Get the colors from the various properties
color_kwargs = {"n_colors": self.n_clusters_}
if self.colors is None:
color_kwargs["colormap"] = "Set1"
elif isinstance(self.colors, str):
color_kwargs["colormap"] = self.colors
else:
color_kwargs["colors"] = self.colors
colors = resolve_colors(**color_kwargs)
# For each cluster, plot the silhouette scores
self.y_tick_pos_ = []
for idx in range(self.n_clusters_):
# Collect silhouette scores for samples in the current cluster .
values = self.silhouette_samples_[labels == idx]
values.sort()
# Compute the size of the cluster and find upper limit
size = values.shape[0]
y_upper = y_lower + size
color = colors[idx]
self.ax.fill_betweenx(
np.arange(y_lower, y_upper),
0,
values,
facecolor=color,
edgecolor=color,
alpha=0.5,
)
# Collect the tick position for each cluster
self.y_tick_pos_.append(y_lower + 0.5 * size)
# Compute the new y_lower for next plot
y_lower = y_upper + 10
# The vertical line for average silhouette score of all the values
self.ax.axvline(
x=self.silhouette_score_,
color="red",
linestyle="--",
label="Average Silhouette Score",
)
return self.ax
def finalize(self):
"""
Prepare the figure for rendering by setting the title and adjusting
the limits on the axes, adding labels and a legend.
"""
# Set the title
self.set_title(
("Silhouette Plot of {} Clustering for {} Samples in {} Centers").format(
self.name, self.n_samples_, self.n_clusters_
)
)
# Set the X and Y limits
# The silhouette coefficient can range from -1, 1;
# but here we scale the plot according to our visualizations
# l_xlim and u_xlim are lower and upper limits of the x-axis,
# set according to our calculated max and min score with necessary padding
l_xlim = max(-1, min(-0.1, round(min(self.silhouette_samples_) - 0.1, 1)))
u_xlim = min(1, round(max(self.silhouette_samples_) + 0.1, 1))
self.ax.set_xlim([l_xlim, u_xlim])
# The (n_clusters_+1)*10 is for inserting blank space between
# silhouette plots of individual clusters, to demarcate them clearly.
self.ax.set_ylim([0, self.n_samples_ + (self.n_clusters_ + 1) * 10])
# Set the x and y labels
self.ax.set_xlabel("silhouette coefficient values")
self.ax.set_ylabel("cluster label")
# Set the ticks on the axis object.
self.ax.set_yticks(self.y_tick_pos_)
self.ax.set_yticklabels(str(idx) for idx in range(self.n_clusters_))
# Set the ticks at multiples of 0.1
self.ax.xaxis.set_major_locator(ticker.MultipleLocator(0.1))
# Show legend (Average Silhouette Score axis)
self.ax.legend(loc="best")
##########################################################################
## Quick Method
##########################################################################
def silhouette_visualizer(
estimator, X, y=None, ax=None, colors=None, is_fitted="auto", show=True, **kwargs
):
"""Quick Method:
The Silhouette Visualizer displays the silhouette coefficient for each
sample on a per-cluster basis, visually evaluating the density and
separation between clusters. The score is calculated by averaging the
silhouette coefficient for each sample, computed as the difference
between the average intra-cluster distance and the mean nearest-cluster
distance for each sample, normalized by the maximum value. This produces a
score between -1 and +1, where scores near +1 indicate high separation
and scores near -1 indicate that the samples may have been assigned to
the wrong cluster.
Parameters
----------
estimator : a Scikit-Learn clusterer
Should be an instance of a centroidal clustering algorithm (``KMeans``
or ``MiniBatchKMeans``). If the estimator is not fitted, it is fit when
the visualizer is fitted, unless otherwise specified by ``is_fitted``.
X : array-like of shape (n, m)
A matrix or data frame with n instances and m features
y : array-like of shape (n,), optional
A vector or series representing the target for each instance
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
colors : iterable or string, default: None
A collection of colors to use for each cluster group. If there are
fewer colors than cluster groups, colors will repeat. May also be a
Yellowbrick or matplotlib colormap string.
is_fitted : bool or str, default='auto'
Specify if the wrapped estimator is already fitted. If False, the
estimator will be fit when the visualizer is fit, otherwise, the
estimator will not be modified. If 'auto' (default), a helper method
will check if the estimator is fitted before fitting it again.
show : bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however
you cannot call ``plt.savefig`` from this signature, nor
``clear_figure``. If False, simply calls ``finalize()``
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Returns
-------
viz : SilhouetteVisualizer
The silhouette visualizer, fitted and finalized.
"""
oz = SilhouetteVisualizer(
estimator, ax=ax, colors=colors, is_fitted=is_fitted, **kwargs
)
oz.fit(X, y)
if show:
oz.show()
else:
oz.finalize()
return oz
| apache-2.0 |
m87/pyEM | loaders.py | 1 | 2098 | import mnist
import numpy as np
from config import *
from sklearn.datasets import fetch_20newsgroups
mnist_init_test = [7,0,1,2,3,4,8,11,18,61]
mnist_init_train = [0,1,2,3,4,5,7,13,15,17]
def mnist_loader(config):
m = mnist.MNIST(path=config.dataset_params[PATH])
m.load_testing()
m.load_training()
t = []
lab = []
ini=[]
if config.dataset_params[SET] == TEST:
t.extend(m.test_images)
lab.extend(m.test_labels)
if config.dataset_init == INIT_FIXED:
for i in mnist_init_test:
ini.append(m.test_images[i])
if config.dataset_params[SET] == TRAIN:
t.extend(m.train_images)
lab.extend(m.train_labels)
if config.dataset_init == INIT_FIXED:
for i in mnist_init_train:
ini.append(m.train_images[i])
if config.dataset_params[SET] == TRAINTEST:
t.extend(m.test_images)
t.extend(m.train_images)
lab.extend(m.test_labels)
lab.extend(m.train_labels)
if config.dataset_init == INIT_FIXED:
for i in mnist_init_test:
ini.append(m.test_images[i])
if config.dataset_init == INIT_RANDOM or config.dataset_init == INIT_FIRST:
ini=t[:config.alg_params[CLUSTERS]]
return t, ini, lab
def covertype_loader(config):
raw = []
inivisited=[]
ini = []
labels=[]
path=config.dataset_params[PATH]
raw = np.load(path+"/data.npy")
raw = raw.astype(np.float)
labels = np.load(path+"/labels.npy")
labels = labels.astype(np.int)
if config.dataset_init == INIT_FIXED:
for it,x in enumerate(labels[1]):
if x not in inivisited:
inivisited.append(x)
ini.append(raw[it])
if len(inivisited) == 7:
break
if config.dataset_init == INIT_RANDOM or config.dataset_init == INIT_FIRST:
ini=raw[:config.alg_params[CLUSTERS]]
return raw, ini, labels
def news_groups_loader(path):
train = fetch_20newsgroups(subset='train')
test = fetch_20newsgroups(subset='test')
| mit |
solashirai/edx-platform | lms/djangoapps/course_api/blocks/transformers/__init__.py | 13 | 2322 | """
Course API Block Transformers
"""
from lms.djangoapps.course_blocks.transformers.visibility import VisibilityTransformer
from .student_view import StudentViewTransformer
from .block_counts import BlockCountsTransformer
from .navigation import BlockNavigationTransformer
class SupportedFieldType(object):
"""
Metadata about fields supported by different transformers
"""
def __init__(
self,
block_field_name,
transformer=None,
requested_field_name=None,
serializer_field_name=None,
default_value=None
):
self.transformer = transformer
self.block_field_name = block_field_name
self.requested_field_name = requested_field_name or block_field_name
self.serializer_field_name = serializer_field_name or self.requested_field_name
self.default_value = default_value
# A list of metadata for additional requested fields to be used by the
# BlockSerializer` class. Each entry provides information on how that field can
# be requested (`requested_field_name`), can be found (`transformer` and
# `block_field_name`), and should be serialized (`serializer_field_name` and
# `default_value`).
SUPPORTED_FIELDS = [
SupportedFieldType('category', requested_field_name='type'),
SupportedFieldType('display_name', default_value=''),
SupportedFieldType('graded'),
SupportedFieldType('format'),
# 'student_view_data'
SupportedFieldType(StudentViewTransformer.STUDENT_VIEW_DATA, StudentViewTransformer),
# 'student_view_multi_device'
SupportedFieldType(StudentViewTransformer.STUDENT_VIEW_MULTI_DEVICE, StudentViewTransformer),
# set the block_field_name to None so the entire data for the transformer is serialized
SupportedFieldType(None, BlockCountsTransformer, BlockCountsTransformer.BLOCK_COUNTS),
SupportedFieldType(
BlockNavigationTransformer.BLOCK_NAVIGATION,
BlockNavigationTransformer,
requested_field_name='nav_depth',
serializer_field_name='descendants',
),
# Provide the staff visibility info stored when VisibilityTransformer ran previously
SupportedFieldType(
'merged_visible_to_staff_only',
VisibilityTransformer,
requested_field_name='visible_to_staff_only',
)
]
| agpl-3.0 |
codeaudit/bosen | app/dnn/script/predict.py | 13 | 1559 | #!/usr/bin/env python
"""
This script starts a process locally, using <client-id> <hostfile> as inputs.
"""
import os
from os.path import dirname
from os.path import join
import time
import sys
if len(sys.argv) != 3:
print "usage: %s <client-id> <hostfile>" % sys.argv[0]
sys.exit(1)
# Please set the FULL app dir path here
app_dir = "/home/user/bosen/app/dnn"
client_id = sys.argv[1]
hostfile = sys.argv[2]
proj_dir = dirname(dirname(app_dir))
params = {
"parafile": join(app_dir, "datasets/para_imnet.txt")
, "data_ptt_file": join(app_dir, "datasets/data_ptt_file.txt")
, "model_weight_file": join(app_dir, "datasets/weights.txt")
, "model_bias_file": join(app_dir, "datasets/biases.txt")
}
petuum_params = {
"hostfile": hostfile,
}
prog_name = "DNN_PRED"
prog_path = join(app_dir, "bin", prog_name)
hadoop_path = os.popen('hadoop classpath --glob').read()
env_params = (
"GLOG_logtostderr=true "
"GLOG_v=-1 "
"GLOG_minloglevel=0 "
)
# Get host IPs
with open(hostfile, "r") as f:
hostlines = f.read().splitlines()
host_ips = [line.split()[1] for line in hostlines]
petuum_params["num_clients"] = len(host_ips)
cmd = "killall -q " + prog_name
# os.system is synchronous call.
os.system(cmd)
print "Done killing"
cmd = "export CLASSPATH=`hadoop classpath --glob`:$CLASSPATH; "
cmd += env_params + prog_path
petuum_params["client_id"] = client_id
cmd += "".join([" --%s=%s" % (k,v) for k,v in petuum_params.items()])
cmd += "".join([" --%s=%s" % (k,v) for k,v in params.items()])
print cmd
os.system(cmd)
| bsd-3-clause |
bucricket/projectMASlst | processlst/lndlst_dms.py | 1 | 15505 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 27 14:03:53 2017
@author: mschull
"""
import os
from osgeo import gdal
import numpy as np
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
import pandas as pd
from scipy.ndimage import zoom
from .utils import folders,writeArray2Envi,clean
from .landsatTools import landsat_metadata, GeoTIFF
import subprocess
from joblib import Parallel, delayed
import shutil
base = os.getcwd()
Folders = folders(base)
landsat_SR = Folders['landsat_SR']
landsat_temp = Folders['landsat_Temp']
landsat_LST = Folders['landsat_LST']
# global prediction
def perpareDMSinp(metaFN,s_row,s_col,locglob,ext):
# meta = landsat_metadata(os.path.join(landsat_temp,'%s_MTL.txt' % productID))
meta = landsat_metadata(metaFN)
sceneID = meta.LANDSAT_SCENE_ID
blue = os.path.join(landsat_temp,"%s_sr_band2.blue.dat" % sceneID)
green = os.path.join(landsat_temp,"%s_sr_band3.green.dat" % sceneID)
red = os.path.join(landsat_temp,"%s_sr_band4.red.dat" % sceneID)
nir = os.path.join(landsat_temp,"%s_sr_band5.nir.dat" % sceneID)
swir1 = os.path.join(landsat_temp,"%s_sr_band6.swir1.dat" % sceneID)
swir2 = os.path.join(landsat_temp,"%s_sr_band7.swir2.dat" % sceneID)
cloud = os.path.join(landsat_temp,"%s_cfmask.cloud.dat" % sceneID)
sw_res = meta.GRID_CELL_SIZE_REFLECTIVE
ulx = meta.CORNER_UL_PROJECTION_X_PRODUCT-(sw_res*0.5)
uly = meta.CORNER_UL_PROJECTION_Y_PRODUCT+(sw_res*0.5)
if sceneID[2]=="5":
native_Thres = 120
elif sceneID[2]=="7":
native_Thres = 60
else:
native_Thres = 90
nrows = meta.REFLECTIVE_LINES
ncols = meta.REFLECTIVE_SAMPLES
zone = meta.UTM_ZONE
#filestem = os.path.join(landsatLAI,"lndsr_modlai_samples.combined_%s-%s" %(startDate,endDate))
lstFN = os.path.join(landsat_temp,"lndsr.%s.cband6.bin" % sceneID)
sharpendFN = os.path.join(landsat_temp,"%s.%s_sharpened_%d_%d.%s" % (sceneID,locglob,s_row,s_col,ext))
#fn = os.path.join(landsat_temp,"dms_%d_%d.inp" % (s_row,s_col))
fn = "dms_%d_%d.inp" % (s_row,s_col)
file = open(fn, "w")
file.write("# input file for Data Mining Sharpener\n")
file.write("NFILES = 6\n")
file.write("SW_FILE_NAME = %s %s %s %s %s %s\n" % (blue,green,red,nir,swir1,swir2))
file.write("SW_CLOUD_MASK = %s\n" % cloud)
file.write("SW_FILE_TYPE = binary\n")
file.write("SW_CLOUD_TYPE = binary\n")
file.write("SW_NROWS = %d\n" % nrows)
file.write("SW_NCOLS = %d\n" % ncols)
file.write("SW_PIXEL_SIZE = %f\n" % sw_res)
file.write("SW_FILL_VALUE = -9999\n")
file.write("SW_CLOUD_CODE = 1\n")
file.write("SW_DATA_RANGE = -2000, 16000\n")
file.write("SW_UPPER_LEFT_CORNER = %f %f\n" % (ulx,uly))
file.write("SW_PROJECTION_CODE = 1\n")
file.write("SW_PROJECTION_PARAMETERS = 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n")
file.write("SW_PROJECTION_ZONE = %d\n" % zone)
file.write("SW_PROJECTION_UNIT = 1\n")
file.write("SW_PROJECTION_DATUM = 12\n")
file.write("ORG_TH_FILE_NAME = %s\n" % lstFN)
file.write("ORG_TH_FILE_TYPE = BINARY\n")
file.write("ORG_TH_DATA_RANGE = 230., 370.\n")
file.write("ORG_TH_PIXEL_SIZE = %f\n" % sw_res)
file.write("ORG_NROWS = %d\n" % nrows)
file.write("ORG_NCOLS = %d\n" % ncols)
file.write("RES_TH_PIXEL_SIZE = %f \n" % native_Thres)
file.write("PURE_CV_TH = 0.1\n")
file.write("ZONE_SIZE = 240\n")
file.write("SMOOTH_FLAG = 1\n")
file.write("CUBIST_FILE_STEM = th_samples_%d_%d\n" % (s_row,s_col))
file.write("OUT_FILE = %s\n" % sharpendFN)
file.write("end")
file.close()
def finalDMSinp(metaFN,ext):
# meta = landsat_metadata(os.path.join(landsat_temp,'%s_MTL.txt' % productID))
meta = landsat_metadata(metaFN)
sceneID = meta.LANDSAT_SCENE_ID
blue = os.path.join(landsat_temp,"%s_sr_band2.blue.dat" % sceneID)
green = os.path.join(landsat_temp,"%s_sr_band3.green.dat" % sceneID)
red = os.path.join(landsat_temp,"%s_sr_band4.red.dat" % sceneID)
nir = os.path.join(landsat_temp,"%s_sr_band5.nir.dat" % sceneID)
swir1 = os.path.join(landsat_temp,"%s_sr_band6.swir1.dat" % sceneID)
swir2 = os.path.join(landsat_temp,"%s_sr_band7.swir2.dat" % sceneID)
cloud = os.path.join(landsat_temp,"%s_cfmask.cloud.dat" % sceneID)
#meta = landsat_metadata(os.path.join(landsat_temp,'%s_MTL.txt' % sceneID))
sw_res = meta.GRID_CELL_SIZE_REFLECTIVE
ulx = meta.CORNER_UL_PROJECTION_X_PRODUCT-(sw_res*0.5)
uly = meta.CORNER_UL_PROJECTION_Y_PRODUCT+(sw_res*0.5)
nrows = meta.REFLECTIVE_LINES
ncols = meta.REFLECTIVE_SAMPLES
zone = meta.UTM_ZONE
if sceneID[2]=="5":
native_Thres = 120
elif sceneID[2]=="7":
native_Thres = 60
else:
native_Thres = 90
#filestem = os.path.join(landsatLAI,"lndsr_modlai_samples.combined_%s-%s" %(startDate,endDate))
lstFN = os.path.join(landsat_temp,"lndsr.%s.cband6.bin" % sceneID)
sharpendFN = os.path.join(landsat_temp,"%s.sharpened_band6.%s" % (sceneID,ext))
fn = os.path.join(landsat_temp,"dms.inp")
fn = "dms.inp"
file = open(fn, "w")
file.write("# input file for Data Mining Sharpener\n")
file.write("NFILES = 6\n")
file.write("SW_FILE_NAME = %s %s %s %s %s %s\n" % (blue,green,red,nir,swir1,swir2))
file.write("SW_CLOUD_MASK = %s\n" % cloud)
file.write("SW_FILE_TYPE = binary\n")
file.write("SW_CLOUD_TYPE = binary\n")
file.write("SW_NROWS = %d\n" % nrows)
file.write("SW_NCOLS = %d\n" % ncols)
file.write("SW_PIXEL_SIZE = %f\n" % sw_res)
file.write("SW_FILL_VALUE = -9999\n")
file.write("SW_CLOUD_CODE = 1\n")
file.write("SW_DATA_RANGE = -2000, 16000\n")
file.write("SW_UPPER_LEFT_CORNER = %f %f\n" % (ulx,uly))
file.write("SW_PROJECTION_CODE = 1\n")
file.write("SW_PROJECTION_PARAMETERS = 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n")
file.write("SW_PROJECTION_ZONE = %d\n" % zone)
file.write("SW_PROJECTION_UNIT = 1\n")
file.write("SW_PROJECTION_DATUM = 12\n")
file.write("ORG_TH_FILE_NAME = %s\n" % lstFN)
file.write("ORG_TH_FILE_TYPE = BINARY\n")
file.write("ORG_TH_DATA_RANGE = 230., 370.\n")
file.write("ORG_TH_PIXEL_SIZE = %f\n" % sw_res)
file.write("ORG_NROWS = %d\n" % nrows)
file.write("ORG_NCOLS = %d\n" % ncols)
file.write("RES_TH_PIXEL_SIZE = %f \n" % native_Thres)
file.write("PURE_CV_TH = 0.1\n")
file.write("ZONE_SIZE = 240\n")
file.write("SMOOTH_FLAG = 1\n")
file.write("CUBIST_FILE_STEM = th_samples\n")
file.write("OUT_FILE = %s\n" % sharpendFN)
file.write("end")
file.close()
def localPred(metaFN,th_res,s_row,s_col):
wsize1 = 200
overlap1 = 20
wsize = int((wsize1*120)/th_res)
overlap = int((overlap1*120)/th_res)
e_row = s_row+wsize
e_col = s_col+wsize
os_row = s_row - overlap
os_col = s_col - overlap
oe_row = e_row +overlap
oe_col = e_col + overlap
perpareDMSinp(metaFN,s_row,s_col,"local","bin")
#dmsfn = os.path.join(landsat_temp,"dms_%d_%d.inp" % (s_row,s_col))
dmsfn = "dms_%d_%d.inp" % (s_row,s_col)
# do cubist prediction
subprocess.call(["get_samples","%s" % dmsfn,"%d" % os_row,"%d" % os_col,
"%d" % oe_row,"%d" % oe_col])
subprocess.call(["cubist","-f", "th_samples_%d_%d" % (s_row,s_col),"-u","-r","15"])
subprocess.call(["predict_fineT","%s" % dmsfn,"%d" % s_row, "%d" % s_col,
"%d" % e_row, "%d" % e_col])
def globalPredSK(metaFN):
meta = landsat_metadata(metaFN)
sceneID = meta.LANDSAT_SCENE_ID
base = os.getcwd()
regr_1 = DecisionTreeRegressor(max_depth=15)
rng = np.random.RandomState(1)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=15),
n_estimators=5, random_state=rng)
fn = os.path.join(base,'th_samples.data')
df = pd.read_csv(fn)
X = np.array(df.iloc[:,3:-4])
w = np.array(df.iloc[:,-1])
w = np.reshape(w,[w.shape[0],1])
X = np.concatenate((X, w), axis=1)
y = np.array(df.iloc[:,-2])
regr_1.fit(X,y)
regr_2.fit(X,y)
blue = os.path.join(landsat_temp,"%s_sr_band2.tif" % sceneID)
green = os.path.join(landsat_temp,"%s_sr_band3.tif" % sceneID)
red = os.path.join(landsat_temp,"%s_sr_band4.tif" % sceneID)
nir = os.path.join(landsat_temp,"%s_sr_band5.tif" % sceneID)
swir1 = os.path.join(landsat_temp,"%s_sr_band6.tif" % sceneID)
swir2 = os.path.join(landsat_temp,"%s_sr_band7.tif" % sceneID)
# open files and assepble them into 2-d numpy array
Gblue = gdal.Open(blue)
blueData = Gblue.ReadAsArray()
blueVec = np.reshape(blueData,[blueData.shape[0]*blueData.shape[1]])
Ggreen = gdal.Open(green)
greenData = Ggreen.ReadAsArray()
greenVec = np.reshape(greenData,[greenData.shape[0]*greenData.shape[1]])
Gnir = gdal.Open(nir)
nirData = Gnir.ReadAsArray()
nirVec = np.reshape(nirData,[nirData.shape[0]*nirData.shape[1]])
Gred = gdal.Open(red)
redData = Gred.ReadAsArray()
redVec = np.reshape(redData,[redData.shape[0]*redData.shape[1]])
Gswir1 = gdal.Open(swir1)
swir1Data = Gswir1.ReadAsArray()
swir1Vec = np.reshape(swir1Data,[swir1Data.shape[0]*swir1Data.shape[1]])
Gswir2 = gdal.Open(swir2)
swir2Data = Gswir2.ReadAsArray()
swir2Vec = np.reshape(swir2Data,[swir2Data.shape[0]*swir2Data.shape[1]])
ylocs = (np.tile(range(0,blueData.shape[0]),(blueData.shape[1],1)).T)/3
xlocs = (np.tile(range(0,blueData.shape[1]),(blueData.shape[0],1)))/3
pixID = ylocs*10000+xlocs
pixIDvec = np.reshape(pixID,[swir2Data.shape[0]*swir2Data.shape[1]])
newDF = pd.DataFrame({'pixID':pixIDvec,'green':greenVec,'red':redVec,
'nir':nirVec,'swir1':swir1Vec,'swir2':swir2Vec})
#newDF.replace(to_replace=-9999,value=np.)
dnMean = newDF.groupby('pixID').mean()
cv = newDF.groupby('pixID').std()/dnMean
meanCV = np.array(cv.mean(axis=1))
meanCV[np.isinf(meanCV)]=10.
meanCV[np.where(meanCV==0)]=10.
weight = 0.1/meanCV
weight[np.isinf(weight)]=20.
weight[np.where(meanCV<0.01)]=10.
weight[weight==20.]=0.
weight[np.where(weight<0.)]=0.
rows = np.array(dnMean.index/10000)
cols = np.array(dnMean.index-((dnMean.index/10000)*10000))
w_array = np.nan * np.empty((greenData.shape[0]/3,greenData.shape[1]/3))
w_array[list(rows), list(cols)] = list(weight)
w_array2 = zoom(w_array,3.)
weight = np.reshape(w_array2,[greenData.shape[0]*greenData.shape[1]])
newDF['weight']=weight
xNew = np.stack((greenVec,redVec,nirVec,swir1Vec,swir2Vec,weight), axis=-1)
outData = regr_1.predict(xNew)
outData = regr_2.predict(xNew)
return np.reshape(outData,[blueData.shape[0],blueData.shape[1]])
def localPredSK(metaFN,th_res,s_row,s_col):
wsize1 = 200
overlap1 = 20
wsize = int((wsize1*120)/th_res)
overlap = int((overlap1*120)/th_res)
e_row = s_row+wsize
e_col = s_col+wsize
os_row = s_row - overlap
os_col = s_col - overlap
oe_row = e_row +overlap
oe_col = e_col + overlap
perpareDMSinp(metaFN,s_row,s_col,"local","bin")
#dmsfn = os.path.join(landsat_temp,"dms_%d_%d.inp" % (s_row,s_col))
dmsfn = "dms_%d_%d.inp" % (s_row,s_col)
# do cubist prediction
subprocess.call(["get_samples","%s" % dmsfn,"%d" % os_row,"%d" % os_col,
"%d" % oe_row,"%d" % oe_col])
localPred = globalPredSK(metaFN)
subprocess.call(["cubist","-f", "th_samples_%d_%d" % (s_row,s_col),"-u","-r","15"])
subprocess.call(["predict_fineT","%s" % dmsfn,"%d" % s_row, "%d" % s_col,
"%d" % e_row, "%d" % e_col])
return localPred
def getSharpenedLST(metaFN):
meta = landsat_metadata(metaFN)
sceneID =meta.LANDSAT_SCENE_ID
#productID = meta.LANDSAT_PRODUCT_ID
productID = metaFN.split(os.sep)[-1][:-8]
sw_res = meta.GRID_CELL_SIZE_REFLECTIVE
ulx = meta.CORNER_UL_PROJECTION_X_PRODUCT-(sw_res*0.5)
uly = meta.CORNER_UL_PROJECTION_Y_PRODUCT+(sw_res*0.5)
xres = meta.GRID_CELL_SIZE_REFLECTIVE
yres = meta.GRID_CELL_SIZE_REFLECTIVE
ls = GeoTIFF(os.path.join(landsat_temp,'%s_sr_band1.tif' % productID))
th_res = meta.GRID_CELL_SIZE_THERMAL
if sceneID[2]=="5":
th_res = 120
elif sceneID[2]=="7":
th_res = 60
else:
th_res = 90
scale = int(th_res/meta.GRID_CELL_SIZE_REFLECTIVE)
nrows = int(meta.REFLECTIVE_LINES/scale)
ncols = int(meta.REFLECTIVE_SAMPLES/scale)
#dmsfn = os.path.join(landsat_temp,"dms_0_0.inp")
dmsfn = "dms.inp"
# create dms.inp
print("========GLOBAL PREDICTION===========")
finalDMSinp(metaFN,"global")
# do global prediction
subprocess.call(["get_samples","%s" % dmsfn])
model = 'cubist'
if model == 'cubist':
subprocess.call(["cubist","-f", "th_samples","-u","-r","30"])
subprocess.call(["predict_fineT","%s" % dmsfn])
else:
#===========EXPERIMENTAL===========
globFN = os.path.join(landsat_temp,"%s.sharpened_band6.global" % sceneID)
globalData = globalPredSK(sceneID)
writeArray2Envi(globalData,ulx,uly,xres,yres,ls.proj4,globFN)
# do local prediction
print("========LOCAL PREDICTION===========")
njobs = -1
wsize1 = 200
wsize = int((wsize1*120)/th_res)
# process local parts in parallel
Parallel(n_jobs=njobs, verbose=5)(delayed(localPred)(metaFN,th_res,s_row,s_col) for s_col in range(0,int(ncols/wsize)*wsize,wsize) for s_row in range(0,int(nrows/wsize)*wsize,wsize))
# put the parts back together
finalFile = os.path.join(landsat_temp,'%s.sharpened_band6.local' % sceneID)
tifFile = os.path.join(landsat_temp,'%s_lstSharp.tiff' % sceneID)
globFN = os.path.join(landsat_temp,"%s.sharpened_band6.global" % sceneID)
Gg = gdal.Open(globFN)
globalData = Gg.ReadAsArray()
for s_col in range(0,int(ncols/wsize)*wsize,wsize):
for s_row in range(0,int(nrows/wsize)*wsize,wsize):
fn = os.path.join(landsat_temp,"%s.local_sharpened_%d_%d.bin" %(sceneID,s_row,s_col))
if os.path.exists(fn):
Lg = gdal.Open(fn)
globalData[0,s_row*scale:s_row*scale+wsize*scale+1,s_col*scale:s_col*scale+wsize*scale+1] = Lg.ReadAsArray(s_col*scale,s_row*scale,wsize*scale+1,wsize*scale+1)[0]
writeArray2Envi(globalData,ulx,uly,xres,yres,ls.proj4,finalFile)
#subprocess.call(["gdal_merge.py", "-o", "%s" % finalFile , "%s" % os.path.join(landsat_temp,'%s.local*' % sceneID)])
# combine the the local and global images
finalDMSinp(metaFN,"bin")
subprocess.call(["combine_models","dms.inp"])
# convert from ENVI to geoTIFF
fn = os.path.join(landsat_temp,"%s.sharpened_band6.bin" % sceneID)
g = gdal.Open(fn)
#=====convert to celcius and scale data======
data = g.ReadAsArray()[1]
data = (data-273.15)*100.
dd = data.astype(np.int16)
ls.clone(tifFile,dd)
# copy files to their proper places
scenePath = os.path.join(landsat_LST,sceneID[3:9])
if not os.path.exists(scenePath):
os.mkdir(scenePath)
shutil.copyfile(tifFile ,os.path.join(scenePath,tifFile.split(os.sep)[-1]))
# cleaning up
clean(landsat_temp,"%s.local_sharpened" % sceneID)
clean(landsat_temp,"%s.sharpened" % sceneID)
clean(base,"th_samples")
clean(base,"dms")
print("DONE SHARPENING")
| bsd-3-clause |
DistrictDataLabs/yellowbrick | tests/test_contrib/test_missing/test_dispersion.py | 1 | 4351 | # tests.test_contrib.test_missing.test_dispersion
# Tests for the alpha selection visualizations.
#
# Author: Nathan Danielsen <[email protected]>
# Created: Thu Mar 29 12:13:04 2018 -0500
#
# Copyright (C) 2018 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: test_dispersion.py [1443e16] [email protected] $
"""
Tests for the MissingValuesDispersion visualizations.
"""
##########################################################################
## Imports
##########################################################################
import os
import pytest
from sklearn.datasets import make_classification
from tests.base import VisualTestCase
from yellowbrick.contrib.missing.dispersion import *
try:
import pandas as pd
except ImportError:
pd = None
@pytest.fixture(scope="class")
def missing_dispersion_tolerance(request):
request.cls.tol = 0.5 if os.name == "nt" else 0.01
##########################################################################
## Feature Importances Tests
##########################################################################
@pytest.mark.usefixtures("missing_dispersion_tolerance")
class TestMissingValuesDispersion(VisualTestCase):
"""
MissingValuesDispersion visualizer
"""
@pytest.mark.skipif(pd is None, reason="pandas is required")
def test_missingvaluesdispersion_with_pandas(self):
"""
Integration test of visualizer with pandas
"""
X, y = make_classification(
n_samples=400,
n_features=20,
n_informative=8,
n_redundant=8,
n_classes=2,
n_clusters_per_class=4,
random_state=854,
)
# add nan values to a range of values in the matrix
X[X > 1.5] = np.nan
X_ = pd.DataFrame(X)
features = [str(n) for n in range(20)]
viz = MissingValuesDispersion(features=features)
viz.fit(X_)
viz.finalize()
self.assert_images_similar(viz, tol=self.tol)
@pytest.mark.skipif(pd is None, reason="pandas is required")
def test_missingvaluesdispersion_with_pandas_with_y_targets(self):
"""
Integration test of visualizer with pandas with y targets
"""
X, y = make_classification(
n_samples=400,
n_features=20,
n_informative=8,
n_redundant=8,
n_classes=2,
n_clusters_per_class=4,
random_state=854,
)
# add nan values to a range of values in the matrix
X[X > 1.5] = np.nan
X_ = pd.DataFrame(X)
features = [str(n) for n in range(20)]
classes = ["Class A", "Class B"]
viz = MissingValuesDispersion(features=features, classes=classes)
viz.fit(X_, y=y)
viz.finalize()
self.assert_images_similar(viz, tol=self.tol)
def test_missingvaluesdispersion_with_numpy(self):
"""
Integration test of visualizer with numpy
"""
X, y = make_classification(
n_samples=400,
n_features=20,
n_informative=8,
n_redundant=8,
n_classes=2,
n_clusters_per_class=4,
random_state=852,
)
# add nan values to a range of values in the matrix
X[X > 1.5] = np.nan
features = [str(n) for n in range(20)]
viz = MissingValuesDispersion(features=features)
viz.fit(X)
viz.finalize()
self.assert_images_similar(viz, tol=self.tol)
def test_missingvaluesdispersion_with_numpy_with_y_targets(self):
"""
Integration test of visualizer with numpy with y targets
"""
X, y = make_classification(
n_samples=400,
n_features=20,
n_informative=8,
n_redundant=8,
n_classes=2,
n_clusters_per_class=4,
random_state=852,
)
# add nan values to a range of values in the matrix
X[X > 1.5] = np.nan
features = [str(n) for n in range(20)]
classes = ["Class A", "Class B"]
viz = MissingValuesDispersion(features=features, classes=classes)
viz.fit(X, y=y)
viz.finalize()
self.assert_images_similar(viz, tol=self.tol)
| apache-2.0 |
woobe/h2o | py/testdir_0xdata_only/test_hdfs_3_fvec.py | 2 | 3808 | import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i
# bug with summary (NPE?)
DO_SUMMARY=False
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(3, use_hdfs=True, hdfs_version='cdh3', hdfs_name_node='192.168.1.176')
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_hdfs2_3(self):
h2o.beta_features = True
print "\nLoad a list of files from HDFS, parse and do 1 RF tree"
print "\nYou can try running as hduser/hduser if fail"
# larger set in my local dir
# fails because classes aren't integers
# "allstate_claim_prediction_train_set.zip",
csvFilenameAll = [
"covtype.data",
"TEST-poker1000.csv",
"leads.csv",
"and-testing.data",
"arcene2_train.both",
"arcene_train.both",
# these can't RF ..output classes not integer?
# "bestbuy_test.csv",
# "bestbuy_train.csv",
"covtype.4x.shuffle.data",
"covtype4x.shuffle.data",
"covtype.13x.data",
"covtype.13x.shuffle.data",
# "covtype.169x.data",
# "prostate_2g.csv",
# "prostate_long.csv.gz",
"prostate_long_1G.csv",
"hhp.unbalanced.012.1x11.data.gz",
"hhp.unbalanced.012.data.gz",
"hhp.unbalanced.data.gz",
"hhp2.os.noisy.0_1.data",
"hhp2.os.noisy.9_4.data",
"hhp_9_14_12.data",
# "poker_c1s1_testing_refresh.csv",
# "3G_poker_shuffle",
# "billion_rows.csv.gz",
# "poker-hand.1244M.shuffled311M.full.txt",
]
# pick 8 randomly!
if (1==0):
csvFilenameList = random.sample(csvFilenameAll,8)
# Alternatively: do the list in order! Note the order is easy to hard
else:
csvFilenameList = csvFilenameAll
# pop open a browser on the cloud
### h2b.browseTheCloud()
timeoutSecs = 200
# save the first, for all comparisions, to avoid slow drift with each iteration
importFolderPath = "datasets"
for csvFilename in csvFilenameList:
# creates csvFilename.hex from file in hdfs dir
print "Loading", csvFilename, 'from HDFS'
csvPathname = importFolderPath + "/" + csvFilename
start = time.time()
parseResult = h2i.import_parse(path=csvPathname, schema="hdfs", timeoutSecs=1000, doSummary=DO_SUMMARY, blocking=1)
print "parse result:", parseResult['destination_key'], 'took', time.time() - start, 'secs'
inspect = h2o_cmd.runInspect(key=parseResult['destination_key'])
## print "inspect:", h2o.dump_json(inspect)
numRows = inspect['numRows']
numCols = inspect['numCols']
print "\n" + csvPathname, \
" numRows:", "{:,}".format(numRows), \
" numCols:", "{:,}".format(numCols)
start = time.time()
modelKey = 'rfmodel.hex'
kwargs = {}
RFview = h2o_cmd.runRF(trees=1, parseResult=parseResult, timeoutSecs=2000, retryDelaySecs=0.5, destination_key=modelKey, **kwargs)
# we should be able to export the model to hdfs
# fails
### e = h2o.nodes[0].export_hdfs(source_key=modelKey, path="/datasets/rfmodel.hex")
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
Fireblend/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 304 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
jmargeta/scikit-learn | examples/linear_model/plot_logistic.py | 4 | 1390 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD
import numpy as np
import pylab as pl
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
pl.figure(1, figsize=(4, 3))
pl.clf()
pl.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
pl.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
pl.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
pl.axhline(.5, color='.5')
pl.ylabel('y')
pl.xlabel('X')
pl.xticks(())
pl.yticks(())
pl.ylim(-.25, 1.25)
pl.xlim(-4, 10)
pl.show()
| bsd-3-clause |
nazo/ansible | lib/ansible/modules/cloud/smartos/vmadm.py | 49 | 24634 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Jasper Lievisse Adriaanse <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmadm
short_description: Manage SmartOS virtual machines and zones.
description:
- Manage SmartOS virtual machines through vmadm(1M).
version_added: "2.3"
author: Jasper Lievisse Adriaanse (@jasperla)
options:
archive_on_delete:
required: false
description:
- When enabled, the zone dataset will be mounted on C(/zones/archive)
upon removal.
autoboot:
required: false
description:
- Whether or not a VM is booted when the system is rebooted.
brand:
required: true
choices: [ joyent, joyent-minimal, kvm, lx ]
default: joyent
description:
- Type of virtual machine.
boot:
required: false
description:
- Set the boot order for KVM VMs.
cpu_cap:
required: false
description:
- Sets a limit on the amount of CPU time that can be used by a VM.
Use C(0) for no cap.
cpu_shares:
required: false
description:
- Sets a limit on the number of fair share scheduler (FSS) CPU shares for
a VM. This limit is relative to all other VMs on the system.
cpu_type:
required: false
choices: [ qemu64, host ]
default: qemu64
description:
- Control the type of virtual CPU exposed to KVM VMs.
customer_metadata:
required: false
description:
- Metadata to be set and associated with this VM, this contain customer
modifiable keys.
delegate_dataset:
required: false
description:
- Whether to delegate a ZFS dataset to an OS VM.
disk_driver:
required: false
description:
- Default value for a virtual disk model for KVM guests.
disks:
required: false
description:
- A list of disks to add, valid properties are documented in vmadm(1M).
dns_domain:
required: false
description:
- Domain value for C(/etc/hosts).
filesystems:
required: false
description:
- Mount additional filesystems into an OS VM.
firewall_enabled:
required: false
description:
- Enables the firewall, allowing fwadm(1M) rules to be applied.
force:
required: false
description:
- Force a particular action (i.e. stop or delete a VM).
fs_allowed:
required: false
description:
- Comma separated list of filesystem types this zone is allowed to mount.
hostname:
required: false
description:
- Zone/VM hostname.
image_uuid:
required: false
description:
- Image UUID.
indestructible_delegated:
required: false
description:
- Adds an C(@indestructible) snapshot to delegated datasets.
indestructible_zoneroot:
required: false
description:
- Adds an C(@indestructible) snapshot to zoneroot.
internal_metadata:
required: false
description:
- Metadata to be set and associated with this VM, this contains operator
generated keys.
internal_metadata_namespace:
required: false
description:
- List of namespaces to be set as I(internal_metadata-only); these namespaces
will come from I(internal_metadata) rather than I(customer_metadata).
kernel_version:
required: false
description:
- Kernel version to emulate for LX VMs.
limit_priv:
required: false
description:
- Set (comma separated) list of privileges the zone is allowed to use.
maintain_resolvers:
required: false
description:
- Resolvers in C(/etc/resolv.conf) will be updated when updating
the I(resolvers) property.
max_locked_memory:
required: false
description:
- Total amount of memory (in MiBs) on the host that can be locked by this VM.
max_lwps:
required: false
description:
- Maximum number of lightweight processes this VM is allowed to have running.
max_physical_memory:
required: false
description:
- Maximum amount of memory (in MiBs) on the host that the VM is allowed to use.
max_swap:
required: false
description:
- Maximum amount of virtual memory (in MiBs) the VM is allowed to use.
mdata_exec_timeout:
required: false
description:
- Timeout in seconds (or 0 to disable) for the C(svc:/smartdc/mdata:execute) service
that runs user-scripts in the zone.
name:
required: false
aliases: [ alias ]
description:
- Name of the VM. vmadm(1M) uses this as an optional name.
nic_driver:
required: false
description:
- Default value for a virtual NIC model for KVM guests.
nics:
required: false
description:
- A list of nics to add, valid properties are documented in vmadm(1M).
nowait:
required: false
description:
- Consider the provisioning complete when the VM first starts, rather than
when the VM has rebooted.
qemu_opts:
required: false
description:
- Additional qemu arguments for KVM guests. This overwrites the default arguments
provided by vmadm(1M) and should only be used for debugging.
qemu_extra_opts:
required: false
description:
- Additional qemu cmdline arguments for KVM guests.
quota:
required: false
description:
- Quota on zone filesystems (in MiBs).
ram:
required: false
description:
- Amount of virtual RAM for a KVM guest (in MiBs).
resolvers:
required: false
description:
- List of resolvers to be put into C(/etc/resolv.conf).
routes:
required: false
description:
- Dictionary that maps destinations to gateways, these will be set as static
routes in the VM.
spice_opts:
required: false
description:
- Addition options for SPICE-enabled KVM VMs.
spice_password:
required: false
description:
- Password required to connect to SPICE. By default no password is set.
Please note this can be read from the Global Zone.
state:
required: true
choices: [ present, absent, stopped, restarted ]
description:
- States for the VM to be in. Please note that C(present), C(stopped) and C(restarted)
operate on a VM that is currently provisioned. C(present) means that the VM will be
created if it was absent, and that it will be in a running state. C(absent) will
shutdown the zone before removing it.
C(stopped) means the zone will be created if it doesn't exist already, before shutting
it down.
tmpfs:
required: false
description:
- Amount of memory (in MiBs) that will be available in the VM for the C(/tmp) filesystem.
uuid:
required: false
description:
- UUID of the VM. Can either be a full UUID or C(*) for all VMs.
vcpus:
required: false
description:
- Number of virtual CPUs for a KVM guest.
vga:
required: false
description:
- Specify VGA emulation used by KVM VMs.
virtio_txburst:
required: false
description:
- Number of packets that can be sent in a single flush of the tx queue of virtio NICs.
virtio_txtimer:
required: false
description:
- Timeout (in nanoseconds) for the TX timer of virtio NICs.
vnc_password:
required: false
description:
- Password required to connect to VNC. By default no password is set.
Please note this can be read from the Global Zone.
vnc_port:
required: false
description:
- TCP port to listen of the VNC server. Or set C(0) for random,
or C(-1) to disable.
zfs_data_compression:
required: false
description:
- Specifies compression algorithm used for this VMs data dataset. This option
only has effect on delegated datasets.
zfs_data_recsize:
required: false
description:
- Suggested block size (power of 2) for files in the delegated dataset's filesystem.
zfs_filesystem_limit:
required: false
description:
- Maximum number of filesystems the VM can have.
zfs_io_priority:
required: false
description:
- IO throttle priority value relative to other VMs.
zfs_root_compression:
required: false
description:
- Specifies compression algorithm used for this VMs root dataset. This option
only has effect on the zoneroot dataset.
zfs_root_recsize:
required: false
description:
- Suggested block size (power of 2) for files in the zoneroot dataset's filesystem.
zfs_snapshot_limit:
required: false
description:
- Number of snapshots the VM can have.
zpool:
required: false
description:
- ZFS pool the VM's zone dataset will be created in.
requirements:
- python >= 2.6
'''
EXAMPLES = '''
- name: create SmartOS zone
vmadm:
brand: joyent
state: present
alias: fw_zone
image_uuid: 95f265b8-96b2-11e6-9597-972f3af4b6d5
firewall_enabled: yes
indestructible_zoneroot: yes
nics:
- nic_tag: admin
ip: dhcp
primary: true
internal_metadata:
root_pw: 'secret'
quota: 1
- name: Delete a zone
vmadm:
alias: test_zone
state: deleted
- name: Stop all zones
vmadm:
uuid: '*'
state: stopped
'''
RETURN = '''
uuid:
description: UUID of the managed VM.
returned: always
type: string
sample: 'b217ab0b-cf57-efd8-cd85-958d0b80be33'
alias:
description: Alias of the managed VM.
returned: When addressing a VM by alias.
type: string
sample: 'dns-zone'
state:
description: State of the target, after execution.
returned: success
type: string
sample: 'running'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils._text import to_native
import os
import re
import tempfile
import traceback
try:
import json
except ImportError:
import simplejson as json
# While vmadm(1M) supports a -E option to return any errors in JSON, the
# generated JSON does not play well with the JSON parsers of Python.
# The returned message contains '\n' as part of the stacktrace,
# which breaks the parsers.
def get_vm_prop(module, uuid, prop):
# Lookup a property for the given VM.
# Returns the property, or None if not found.
cmd = '{0} lookup -j -o {1} uuid={2}'.format(module.vmadm, prop, uuid)
(rc, stdout, stderr) = module.run_command(cmd)
if rc != 0:
module.fail_json(
msg='Could not perform lookup of {0} on {1}'.format(prop, uuid), exception=stderr)
try:
stdout_json = json.loads(stdout)
except:
e = get_exception()
module.fail_json(
msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(alias),
details=to_native(e))
if len(stdout_json) > 0 and prop in stdout_json[0]:
return stdout_json[0][prop]
else:
return None
def get_vm_uuid(module, alias):
# Lookup the uuid that goes with the given alias.
# Returns the uuid or '' if not found.
cmd = '{0} lookup -j -o uuid alias={1}'.format(module.vmadm, alias)
(rc, stdout, stderr) = module.run_command(cmd)
if rc != 0:
module.fail_json(
msg='Could not retrieve UUID of {0}'.format(alias), exception=stderr)
# If no VM was found matching the given alias, we get back an empty array.
# That is not an error condition as we might be explicitly checking it's
# absence.
if stdout.strip() == '[]':
return None
else:
try:
stdout_json = json.loads(stdout)
except:
e = get_exception()
module.fail_json(
msg='Invalid JSON returned by vmadm for uuid lookup of {0}'.format(alias),
details=to_native(e))
if len(stdout_json) > 0 and 'uuid' in stdout_json[0]:
return stdout_json[0]['uuid']
def get_all_vm_uuids(module):
# Retrieve the UUIDs for all VMs.
cmd = '{0} lookup -j -o uuid'.format(module.vmadm)
(rc, stdout, stderr) = module.run_command(cmd)
if rc != 0:
module.fail_json(msg='Failed to get VMs list', exception=stderr)
try:
stdout_json = json.loads(stdout)
return [v['uuid'] for v in stdout_json]
except:
e = get_exception()
module.fail_json(msg='Could not retrieve VM UUIDs', details=to_native(e))
def new_vm(module, uuid, vm_state):
payload_file = create_payload(module, uuid)
(rc, stdout, stderr) = vmadm_create_vm(module, payload_file)
if rc != 0:
changed = False
module.fail_json(msg='Could not create VM', exception=stderr)
else:
changed = True
# 'vmadm create' returns all output to stderr...
match = re.match('Successfully created VM (.*)', stderr)
if match:
vm_uuid = match.groups()[0]
if not is_valid_uuid(vm_uuid):
module.fail_json(msg='Invalid UUID for VM {0}?'.format(vm_uuid))
else:
module.fail_json(msg='Could not retrieve UUID of newly created(?) VM')
# Now that the VM is created, ensure it is in the desired state (if not 'running')
if vm_state != 'running':
ret = set_vm_state(module, vm_uuid, vm_state)
if not ret:
module.fail_json(msg='Could not set VM {0} to state {1}'.format(vm_uuid, vm_state))
try:
os.unlink(payload_file)
except Exception as e:
# Since the payload may contain sensitive information, fail hard
# if we cannot remove the file so the operator knows about it.
module.fail_json(
msg='Could not remove temporary JSON payload file {0}'.format(payload_file),
exception=traceback.format_exc())
return changed, vm_uuid
def vmadm_create_vm(module, payload_file):
# Create a new VM using the provided payload.
cmd = '{0} create -f {1}'.format(module.vmadm, payload_file)
return module.run_command(cmd)
def set_vm_state(module, vm_uuid, vm_state):
p = module.params
# Check if the VM is already in the desired state.
state = get_vm_prop(module, vm_uuid, 'state')
if state and (state == vm_state):
return None
# Lookup table for the state to be in, and which command to use for that.
# vm_state: [vmadm commandm, forceable?]
cmds = {
'stopped': ['stop', True],
'running': ['start', False],
'deleted': ['delete', True],
'rebooted': ['reboot', False]
}
if p['force'] and cmds[vm_state][1]:
force = '-F'
else:
force = ''
cmd = 'vmadm {0} {1} {2}'.format(cmds[vm_state][0], force, vm_uuid)
(rc, stdout, stderr) = module.run_command(cmd)
match = re.match('^Successfully.*', stderr)
if match:
return True
else:
return False
def create_payload(module, uuid):
# Create the JSON payload (vmdef) and return the filename.
p = module.params
# Filter out the few options that are not valid VM properties.
module_options = ['debug', 'force', 'state']
vmattrs = filter(lambda prop: prop not in module_options, p)
vmdef = {}
for attr in vmattrs:
if p[attr]:
vmdef[attr] = p[attr]
try:
vmdef_json = json.dumps(vmdef)
except Exception as e:
module.fail_json(
msg='Could not create valid JSON payload', exception=traceback.format_exc())
# Create the temporary file that contains our payload, and set tight
# permissions for it may container sensitive information.
try:
# XXX: When there's a way to get the current ansible temporary directory
# drop the mkstemp call and rely on ANSIBLE_KEEP_REMOTE_FILES to retain
# the payload (thus removing the `save_payload` option).
fname = tempfile.mkstemp()[1]
fh = open(fname, 'w')
os.chmod(fname, 0o400)
fh.write(vmdef_json)
fh.close()
except Exception as e:
module.fail_json(
msg='Could not save JSON payload', exception=traceback.format_exc())
return fname
def vm_state_transition(module, uuid, vm_state):
ret = set_vm_state(module, uuid, vm_state)
# Whether the VM changed state.
if ret is None:
return False
elif ret:
return True
else:
module.fail_json(msg='Failed to set VM {0} to state {1}'.format(uuid, vm_state))
def is_valid_uuid(uuid):
if re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', uuid, re.IGNORECASE):
return True
else:
return False
def validate_uuids(module):
# Perform basic UUID validation.
failed = []
for u in [['uuid', module.params['uuid']],
['image_uuid', module.params['image_uuid']]]:
if u[1] and u[1] != '*':
if not is_valid_uuid(u[1]):
failed.append(u[0])
if len(failed) > 0:
module.fail_json(msg='No valid UUID(s) found for: {0}'.format(", ".join(failed)))
def manage_all_vms(module, vm_state):
# Handle operations for all VMs, which can by definition only
# be state transitions.
state = module.params['state']
if state == 'created':
module.fail_json(msg='State "created" is only valid for tasks with a single VM')
# If any of the VMs has a change, the task as a whole has a change.
any_changed = False
# First get all VM uuids and for each check their state, and adjust it if needed.
for uuid in get_all_vm_uuids(module):
current_vm_state = get_vm_prop(module, uuid, 'state')
if not current_vm_state and vm_state == 'deleted':
any_changed = False
else:
if module.check_mode:
if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state):
any_changed = True
else:
any_changed = (vm_state_transition(module, uuid, vm_state) | any_changed)
return any_changed
def main():
# In order to reduce the clutter and boilerplate for trivial options,
# abstract the vmadm properties and build the dict of arguments later.
# Dict of all options that are simple to define based on their type.
# They're not required and have a default of None.
properties = {
'str': [
'boot', 'disk_driver', 'dns_domain', 'fs_allowed', 'hostname',
'image_uuid', 'internal_metadata_namespace', 'kernel_version',
'limit_priv', 'nic_driver', 'qemu_opts', 'qemu_extra_opts',
'spice_opts', 'uuid', 'vga', 'zfs_data_compression',
'zfs_root_compression', 'zpool'
],
'bool': [
'archive_on_delete', 'autoboot', 'debug', 'delegate_dataset',
'firewall_enabled', 'force', 'indestructible_delegated',
'indestructible_zoneroot', 'maintain_resolvers', 'nowait'
],
'int': [
'cpu_cap', 'cpu_shares', 'max_locked_memory', 'max_lwps',
'max_physical_memory', 'max_swap', 'mdata_exec_timeout',
'quota', 'ram', 'tmpfs', 'vcpus', 'virtio_txburst',
'virtio_txtimer', 'vnc_port', 'zfs_data_recsize',
'zfs_filesystem_limit', 'zfs_io_priority', 'zfs_root_recsize',
'zfs_snapshot_limit'
],
'dict': ['customer_metadata', 'internal_metadata', 'routes'],
'list': ['disks', 'nics', 'resolvers', 'filesystems']
}
# Start with the options that are not as trivial as those above.
options = dict(
state=dict(
default='running',
type='str',
choices=['present', 'running', 'absent', 'deleted', 'stopped', 'created', 'restarted', 'rebooted']
),
name=dict(
default=None, type='str',
aliases=['alias']
),
brand=dict(
default='joyent',
type='str',
choices=['joyent', 'joyent-minimal', 'kvm', 'lx']
),
cpu_type=dict(
default='qemu64',
type='str',
choices=['host','qemu64']
),
# Regular strings, however these require additional options.
spice_password=dict(type='str', no_log=True),
vnc_password=dict(type='str', no_log=True),
)
# Add our 'simple' options to options dict.
for type in properties:
for p in properties[type]:
option = dict(default=None, type=type)
options[p] = option
module = AnsibleModule(
argument_spec=options,
supports_check_mode=True,
required_one_of=[['name', 'uuid']]
)
module.vmadm = module.get_bin_path('vmadm', required=True)
p = module.params
uuid = p['uuid']
state = p['state']
# Translate the state paramter into something we can use later on.
if state in ['present', 'running']:
vm_state = 'running'
elif state in ['stopped', 'created']:
vm_state = 'stopped'
elif state in ['absent', 'deleted']:
vm_state = 'deleted'
elif state in ['restarted', 'rebooted']:
vm_state = 'rebooted'
result = {'state': state}
# While it's possible to refer to a given VM by it's `alias`, it's easier
# to operate on VMs by their UUID. So if we're not given a `uuid`, look
# it up.
if not uuid:
uuid = get_vm_uuid(module, p['name'])
# Bit of a chicken and egg problem here for VMs with state == deleted.
# If they're going to be removed in this play, we have to lookup the
# uuid. If they're already deleted there's nothing to looup.
# So if state == deleted and get_vm_uuid() returned '', the VM is already
# deleted and there's nothing else to do.
if uuid is None and vm_state == 'deleted':
result['name'] = p['name']
module.exit_json(**result)
validate_uuids(module)
if p['name']:
result['name'] = p['name']
result['uuid'] = uuid
if uuid == '*':
result['changed'] = manage_all_vms(module, vm_state)
module.exit_json(**result)
# The general flow is as follows:
# - first the current state of the VM is obtained by it's UUID.
# - If the state was not found and the desired state is 'deleted', return.
# - If the state was not found, it means the VM has to be created.
# Subsequently the VM will be set to the desired state (i.e. stopped)
# - Otherwise, it means the VM exists already and we operate on it's
# state (i.e. reboot it.)
#
# In the future it should be possible to query the VM for a particular
# property as a valid state (i.e. queried) so the result can be
# registered.
# Also, VMs should be able to get their properties updated.
# Managing VM snapshots should be part of a standalone module.
# First obtain the VM state to determine what needs to be done with it.
current_vm_state = get_vm_prop(module, uuid, 'state')
# First handle the case where the VM should be deleted and is not present.
if not current_vm_state and vm_state == 'deleted':
result['changed'] = False
elif module.check_mode:
# Shortcut for check mode, if there is no VM yet, it will need to be created.
# Or, if the VM is not in the desired state yet, it needs to transition.
if (not current_vm_state) or (get_vm_prop(module, uuid, 'state') != state):
result['changed'] = True
else:
result['changed'] = False
module.exit_json(**result)
# No VM was found that matched the given ID (alias or uuid), so we create it.
elif not current_vm_state:
result['changed'], result['uuid'] = new_vm(module, uuid, vm_state)
else:
# VM was found, operate on its state directly.
result['changed'] = vm_state_transition(module, uuid, vm_state)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
woobe/h2o | py/testdir_multi_jvm/test_GLM2_covtype_single_cols.py | 2 | 1935 | import unittest, time, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_hosts, h2o_import as h2i, h2o_exec as h2e
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(2)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_covtype_single_cols(self):
h2o.beta_features = True
timeoutSecs = 120
csvPathname = 'standard/covtype.data'
print "\n" + csvPathname
# columns start at 0
y = 54
ignore_x = ""
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put',
hex_key='A.hex', timeoutSecs=15)
case = 2
execExpr="A.hex[,%s]=(A.hex[,%s]==%s)" % (y+1, y+1, case)
h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
print "GLM binomial ignoring 1 X column at a time"
print "Result check: abs. value of coefficient and intercept returned are bigger than zero"
for colX in xrange(1,53):
if ignore_x == "":
ignore_x = 'C' + str(colX)
else:
# x = x + "," + str(colX)
ignore_x = 'C' + str(colX)
sys.stdout.write('.')
sys.stdout.flush()
print "y:", y
start = time.time()
kwargs = {'ignored_cols': ignore_x, 'response': y, 'n_folds': 6 }
glm = h2o_cmd.runGLM(parseResult={'destination_key': 'A.hex'}, timeoutSecs=timeoutSecs, **kwargs)
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
print "glm end on ", csvPathname, 'took', time.time() - start, 'seconds'
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
wuxue/altanalyze | JunctionArray.py | 1 | 107748 | ###JunctionArray
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - [email protected]
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, string
import os.path
import unique
import math
import reorder_arrays
import ExonArray
import EnsemblImport
import ExonArrayEnsemblRules
import JunctionArrayEnsemblRules
import export
import RNASeq
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def verifyFile(filename,server_folder):
fn=filepath(filename)
try:
for line in open(fn,'rU').xreadlines():break
except Exception:
import update; reload(update)
if server_folder == None: server_folder = 'AltMouse'
continue_analysis = update.downloadCurrentVersion(filename,server_folder,'')
if continue_analysis == 'no':
print 'The file:\n',filename, '\nis missing and cannot be found online. Please save to the designated directory or contact AltAnalyze support.';sys.exit()
########### Recent code for dealing with comprehensive Affymetrix Junction Arrays
########### Begin Analyses ###########
class ExonAnnotationData:
def Probeset(self): return self._probeset
def ProbesetName(self): return self._psr
def ExonClusterID(self): return self._exon_cluster_id
def setGeneID(self, geneID): self.geneid = geneID
def GeneID(self): return self.geneid
def setTransSplicing(self): self.trans_splicing = 'yes'
def setSecondaryGeneID(self,secondary_geneid): self.secondary_geneid = secondary_geneid
def SecondaryGeneID(self): return self.secondary_geneid
def checkExonPosition(self,exon_pos): return 'left'
def TransSplicing(self): return self.trans_splicing
def EnsemblGeneID(self):
geneid = self._geneid
if 'ENS' in self._geneid:
if ',' in self._geneid:
ens=[]
ids = string.split(self._geneid,',')
for id in ids:
if 'ENS' in id: ens.append(id)
geneid = unique.unique(ens)[-1]
else: geneid=''
return geneid
def EnsemblGeneIDs(self):
geneid = self._geneid
if 'ENS' in self._geneid:
if ',' in self._geneid:
ens=[]
ids = string.split(self._geneid,',')
for id in ids:
if 'ENS' in id: ens.append(id)
geneids = unique.unique(ens)
else: geneids = [self._geneid]
else: geneids=[]
return geneids
def Symbol(self):
try: symbols = string.split(self._symbols,',')
except Exception: symbols = self._symbols
return symbols
def setTranscriptClusterID(self,transcript_cluster): self._transcript_cluster = transcript_cluster
def TranscriptCluster(self):
if self._transcript_cluster[-2:] == '.1':
self._transcript_cluster = self._transcript_cluster[:-2]
return self._transcript_cluster
def setTranscripts(self, transcripts): self.transcripts = transcripts
def EnsemblTranscripts(self): return self.transcripts
def ProbesetType(self):
###e.g. Exon, junction, constitutive(gene)
return self._probeset_type
def setStart(self, start): self.start = start
def setEnd(self, end): self.end = end
def Start(self): return self.start
def End(self): return self.end
def setChromosome(self,chr):
self._chromosome_info = chr
def Chromosome(self):
if len(self._chromosome_info)>0:
try:
null,chr = string.split(self._chromosome_info,'=chr')
chromosome,null=string.split(chr,':')
except Exception: chromosome = self._chromosome_info
if chromosome == 'chrM': chromosome = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if chromosome == 'M': chromosome = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
else: chromosome = 'not-assinged'
return chromosome
def Strand(self):
if self._strand == '-': self._strand = '-1'
else: self._strand = '1'
return self._strand
def ProbesetClass(self):
###e.g. core, extendended, full
#return self._probest_class
return 'core'
def ExternalExonClusterIDs(self): return self._exon_clusters
def ExternalExonClusterIDList(self):
external_exonid_list = string.split(self.ExternalExonClusterIDs(),'|')
return external_exonid_list
def Constitutive(self): return self._constitutive_status
def Sequence(self): return string.lower(self._seq)
def JunctionSequence(self): return string.replace(self.Sequence(),'|','')
def JunctionSequences(self):
try: seq1, seq2 = string.split(self.Sequence(),'|')
except Exception:
seq1 = self.Sequence()[:len(self.Sequence())/2]
seq2 = self.Sequence()[-1*len(self.Sequence())/2:]
return seq1, seq2
def Report(self):
output = self.Probeset()
return output
def __repr__(self): return self.Report()
class PSRAnnotation(ExonAnnotationData):
def __init__(self,psr,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_clusters,constitutive,seq,probeset_type):
self._transcript_cluster = transcript_cluster; self._geneid = geneids; self._exon_clusters = exon_clusters;
self._constitutive_status = constitutive; self._symbols = symbols
self._strand = strand; self._chromosome_info = ucsclink; self._probeset = probeset; self._psr = psr; self._seq = seq
self._probeset_type = probeset_type
class EnsemblInformation:
def __init__(self, chr, strand, gene, symbol, description):
self._chr = chr; self._strand = strand; self._gene = gene; self._description = description
self._symbol = symbol
def GeneID(self): return self._gene
def Chromosome(self):
if self._chr == 'chrM': self._chr = 'chrMT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
if self._chr == 'M': self._chr = 'MT' ### MT is the Ensembl convention whereas M is the Affymetrix and UCSC convention
return self._chr
def Strand(self): return self._strand
def Description(self): return self._description
def Symbol(self): return self._symbol
def __repr__(self): return self.GeneID()
def importEnsemblLiftOverData(filename):
fn=filepath(filename); ens_translation_db={}
print 'importing:',filename
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
tc, new_ens, new_coord = string.split(data,'\t')
ens_translation_db[tc]=new_ens
print len(ens_translation_db), 'Old versus new Ensembl IDs imported (from coordinate liftover and realignment)'
return ens_translation_db
def importJunctionArrayAnnotations(species,array_type,specific_array_type):
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_LiftOverEnsembl.txt'
try: verifyFile(filename,array_type+'/'+specific_array_type) ### Downloads server file if not local
except Exception: null=[]
try: ens_translation_db = importEnsemblLiftOverData(filename)
except Exception: ens_translation_db={}; print "No coordinate LiftOver file present (not supplied for HJAY or MJAY)!!!!"
import EnsemblImport
ens_gene_chr_db = EnsemblImport.importEnsGeneData(species) ### retrieves chromosome and strand info for each gene
ensembl_annotations = 'AltDatabase/ensembl/'+ species + '/'+species+ '_Ensembl-annotations_simple.txt'
ensembl_annotation_db = importGeneric(ensembl_annotations)
extraction_type = 'Ensembl'
tc_ensembl_annotations = importJunctionArrayAnnotationMappings(array_type,specific_array_type,species,extraction_type)
if 'HTA' in specific_array_type or 'MTA' in specific_array_type:
ens_trans_gene_db = importGenericReverse('AltDatabase/ensembl/Hs/Hs_Ensembl_transcript-annotations.txt')
ensembl_symbol_db={}; ensembl_gene_db={}
for ens_geneid in ensembl_annotation_db:
description, symbol = ensembl_annotation_db[ens_geneid]
if ens_geneid in ens_gene_chr_db:
chr,strand = ens_gene_chr_db[ens_geneid]
ei = EnsemblInformation(chr,strand,ens_geneid,symbol,description)
if len(symbol)>0:
try: ensembl_symbol_db[symbol].append(ei)
except KeyError: ensembl_symbol_db[symbol] =[ei]
ensembl_gene_db[ens_geneid] = ei
primary_gene_annotation_export = 'AltDatabase/'+species +'/'+ array_type +'/'+ array_type+ '_gene_annotations.txt'
ens_match=0; sym_match=0; ensembl_associations={}; gene_annotation_db={}; missing_count=0
### We want to maximize accurate gene-transcript associations (given the poor state of annotations provided by Affymetrix in these files)
for transcript_cluster_id in tc_ensembl_annotations:
ti = tc_ensembl_annotations[transcript_cluster_id]
try: ens_transcripts = ti.EnsemblTranscripts()
except Exception: ens_transcripts = []
ens_geneids={}; ens_geneid_ls=[]
for gene in ti.EnsemblGeneIDs():
if gene in ens_translation_db and gene not in ensembl_gene_db: ### This is the old lift over method where an old Ens in the annotation file is translated to a more recent ID
gene = ens_translation_db[gene] ### translate the old to new Ensembl
if gene in ensembl_gene_db:
try: ens_geneids[gene]+=1
except Exception: ens_geneids[gene]=1
ens_match+=1
if len(ti.EnsemblGeneIDs())>0:
for transcript in ens_transcripts:
try:
gene = ens_trans_gene_db[transcript]
try: ens_geneids[gene]+=1
except Exception: ens_geneids[gene]=1
ens_match+=1
except Exception: pass
#if transcript_cluster_id == 'TC01000626.hg.1':
#print ti.EnsemblGeneIDs(), ti.EnsemblTranscripts(); sys.exit()
if transcript_cluster_id in ens_translation_db:
gene = ens_translation_db[transcript_cluster_id] ### translate the TC to new Ensembl
if gene in ensembl_gene_db:
try: ens_geneids[gene]+=1
except Exception: ens_geneids[gene]=1
ens_match+=1
for symbol in ti.Symbol():
if symbol in ensembl_symbol_db:
for ei in ensembl_symbol_db[symbol]:
#print [symbol, ei.GeneID(),ti.Chromosome()]; sys.exit()
#print [ei.Chromosome(),ti.Chromosome(),ei.Strand(),ti.Strand()];kill
if ti.Chromosome() != 'not-assinged': ### Valid for HJAY and MJAY arrays
if ei.Chromosome() == ti.Chromosome() and ei.Strand() == ti.Strand():
try: ens_geneids[ei.GeneID()]+=1
except Exception: ens_geneids[ei.GeneID()]=1
sym_match+=1
else: ### Valid for GLU arrays (since Affymetrix decided to change the file formats and content!!!)
try: ens_geneids[ei.GeneID()]+=1
except Exception: ens_geneids[ei.GeneID()]=1
sym_match+=1
for gene in ens_geneids: ens_geneid_ls.append([ens_geneids[gene],gene]) ### Rank these to get Ensembls that have symbol and ID evidence where possible
ens_geneid_ls.sort(); ens_geneid_ls.reverse()
if len(ens_geneid_ls)>0:
ens_geneid = ens_geneid_ls[0][1] ### Best evidence gene association
try: ensembl_associations[transcript_cluster_id].append(ens_geneid)
except KeyError: ensembl_associations[transcript_cluster_id] = [ens_geneid]
ei = ensembl_gene_db[ens_geneid]
gene_annotation_db[transcript_cluster_id]=[ei.Description(),ens_geneid,ei.Symbol(),'']
else:
missing_count+=1
#if missing_count<20: print transcript_cluster_id,ti.EnsemblGeneIDs(),ti.Symbol()
if 'HTA' in specific_array_type or 'MTA' in specific_array_type:
### Add TCs based on genomic overlap positions with Ensembl genes
coordinates_to_annotate={}; added_genes=0
for transcript_cluster_id in tc_ensembl_annotations:
ti = tc_ensembl_annotations[transcript_cluster_id]
if ti.Strand() == '-1': strand = '-'
else: strand = '+'
try: coordinates_to_annotate[ti.Chromosome(),strand].append([(ti.Start(),ti.End()),ti])
except Exception: coordinates_to_annotate[ti.Chromosome(),strand] = [[(ti.Start(),ti.End()),ti]]
import RNASeq
limit = 0
RNASeq.alignCoordinatesToGeneExternal(species,coordinates_to_annotate)
for transcript_cluster_id in tc_ensembl_annotations:
ti = tc_ensembl_annotations[transcript_cluster_id]
if transcript_cluster_id not in gene_annotation_db:
try:
if 'ENSG' in ti.GeneID() or 'ENSMUSG' in ti.GeneID():
gene_annotation_db[transcript_cluster_id]=['',ti.GeneID(),ti.Symbol()[0],'']
try: ensembl_associations[transcript_cluster_id].append(ti.GeneID())
except KeyError: ensembl_associations[transcript_cluster_id] = [ti.GeneID()]
added_genes+=1
except Exception:
if limit < 0:# set to 20 - missing are typically retired Ensembl IDs
print transcript_cluster_id
limit+=1
else:
try:
if 'ENSG' in ti.GeneID() or 'ENSMUSG' in ti.GeneID(): added_genes+=1
except Exception: pass
print added_genes
exportDB(primary_gene_annotation_export,gene_annotation_db)
ensembl_associations = eliminate_redundant_dict_values(ensembl_associations)
print ens_match, 'direct Ensembl-Ensembl gene mapping and', sym_match, 'indirect Symbol-chromosome mapping'
print len(tc_ensembl_annotations)-len(ensembl_associations),'unmapped transcript clusters'
print len(gene_annotation_db), 'transcripts with associated valid Ensembl gene IDs'#; sys.exit()
"""
u=0 ### print transcript clusters without gene IDs
for i in tc_ensembl_annotations:
if i not in ensembl_associations:
if u<15:
print i, tc_ensembl_annotations[i].EnsemblGeneID(); u+=1
"""
exportArrayIDEnsemblAssociations(ensembl_associations,species,array_type) ###Use these For LinkEST program
return ensembl_associations
def pickShortestExonIDDiff(exon_to_exon):
if '|' in exon_to_exon: delim = '|'
else: delim = '///'
if delim not in exon_to_exon:
try: five_exon,three_exon=string.split(exon_to_exon,'_to_')
except Exception: print [exon_to_exon];sys.exit()
return five_exon,three_exon
else:
exon_comps = string.split(exon_to_exon,delim); diff_list=[]
for exon_comp in exon_comps:
five_exon,three_exon=string.split(exon_comp,'_to_')
try: diff=abs(int(five_exon[5:])-int(three_exon[5:]))
except Exception: diff=abs(int(five_exon[4:-3])-int(three_exon[4:-3])) #hta
diff_list.append((diff,[five_exon,three_exon]))
diff_list.sort()
return diff_list[0][1]
def importJunctionArrayAnnotationMappings(array_type,specific_array_type,species,extraction_type):
print 'Importing junction array sequence mapping'
export_dir = 'AltDatabase/'+species+'/'+array_type+'/'
filename = export_dir+string.lower(species[0])+'jay.r2.annotation_map'
if 'lue' in specific_array_type: ### Grab an hGlue specific annotation file
filename = export_dir+string.lower(species[0])+'Glue_3_0_v1.annotation_map_dt.v3.hg18.csv'
elif 'HTA' in specific_array_type:
try: psr_probeset_db = importGenericReverse(export_dir+'probeset-psr.txt')
except Exception:
psr_probeset_db = importGenericReverse(export_dir+species+'_probeset-psr.txt')
if extraction_type == 'Ensembl':
filename = export_dir+'HTA-2_0.na33.hg19.transcript.csv'
type = 'TranscriptCluster'
else:
filename = export_dir+'HTA-2_0.na33.hg19.probeset.csv'
#filename = export_dir+'test.csv'
elif 'MTA' in specific_array_type:
try: psr_probeset_db = importGenericReverse(export_dir+'probeset-psr.txt')
except Exception:
psr_probeset_db = importGenericReverse(export_dir+species+'_probeset-psr.txt')
if extraction_type == 'Ensembl':
filename = export_dir+'MTA-1_0.na35.mm10.transcript.csv'
type = 'TranscriptCluster'
else:
filename = export_dir+'MTA-1_0.na35.mm10.probeset.csv'
#filename = export_dir+'test.csv'
verifyFile(filename,array_type) ### Check's to see if it is installed and if not, downloads or throws an error
fn=filepath(filename)
if extraction_type == 'sequence':
probeset_junctionseq_export = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'_critical-junction-seq.txt'
fn2=filepath(probeset_junctionseq_export); dw = open(fn2,'w'); print "Exporting",probeset_junctionseq_export
probeset_translation_db={}; x=0; tc=0; j=0; p=0; k=0; tc_db=(); transcript_cluster_count={}; transcript_cluster_count2={}
global probeset_db; global junction_comp_db; junction_comp_db={}; global junction_alinging_probesets
ps_db={}; jc_db={}; left_ec={}; right_ec={}; psr_ec={}; probeset_db={}; junction_alinging_probesets={}; nonconstitutive_junctions={}
header_row = True; ct=0; probeset_types = {}
for line in open(fn,'r').xreadlines():
#if 'PSR170003198' in line:
if '.csv' in filename:
data = altCleanUpLine(line)
if '"' in data :
t = string.split(data,'"')
new_string = t[0]
for i in t[1:-1]:
if len(i)>1:
if ',' in i[1:-1]: ### can have legitimate commas on the outsides
i = string.replace(i,",",'|')
new_string+=i
new_string+=t[-1]
t = string.split(new_string[:-1],',')
else: t = string.split(data,',')
else:
data = cleanUpLine(line)
t = string.split(data,'\t')
if x<5 or '#' == data[0]: x+=1
elif x>2:
if 'HTA' in specific_array_type or 'MTA' in specific_array_type:
if extraction_type != 'Ensembl': type = 'PSR'
### This is the probeset file which has a different structure and up-to-date genomic coordinates (as of hg19)
if header_row:
psr_index = t.index('probeset_id'); si = t.index('strand'); sqi = t.index('seqname')
starti = t.index('start'); endi = t.index('stop')
if type == 'TranscriptCluster':
ai = t.index('mrna_assignment'); gi = t.index('gene_assignment')
else:
pti = t.index('probeset_type'); jse = t.index('junction_start_edge'); jee = t.index('junction_stop_edge')
jsi = t.index('junction_sequence'); tci = t.index('transcript_cluster_id'); xi = t.index('exon_id')
csi = t.index('constituitive')
header_row = False
else:
#probeset_type = t[pti]
#try: probeset_types[probeset_type]+=1
#except Exception: probeset_types[probeset_type]=1
#if probeset_type == 'main':
psr = t[psr_index]
try: probeset = psr_probeset_db[psr]
except Exception: probeset = psr
if type == 'TranscriptCluster':
transcript_annotation = t[ai]; gene_annotation = t[gi]
chr = t[sqi]
strand = t[si]
symbols=[]; ens_transcripts = []; geneids=[]
gene_annotation = string.split(gene_annotation,' /// ')
for ga in gene_annotation:
try: ga = string.split(ga,' // '); symbols = ga[1]
except Exception: pass
if 'ENSG' in transcript_annotation or 'ENSMUSG' in transcript_annotation:
if 'ENSG' in transcript_annotation: delim = 'ENSG'
if 'ENSMUSG' in transcript_annotation: delim = 'ENSMUSG'
try:
ta = string.split(transcript_annotation,delim)[1]
try: ta = string.split(ta,' ')[0]
except Exception: pass
geneids=delim+ta
except Exception: pass
if 'ENST' in transcript_annotation or 'ENSMUST' in transcript_annotation:
if 'ENST' in transcript_annotation: delim = 'ENST'
if 'ENSMUST' in transcript_annotation: delim = 'ENSMUST'
try:
gene_annotation = string.split(transcript_annotation,delim)[1]
try: gene_annotation = string.split(gene_annotation,' ')[0]
except Exception: pass
ens_transcripts = [delim+gene_annotation]
except Exception: pass
#if probeset == 'TC04000084.hg.1':
#print transcript_annotation;sys.exit()
#print probeset, strand, geneids, ens_transcripts, symbols
probeset = probeset[:-2] # remove the .1 or .0 at the end - doesn't match to the probeset annotations
psri = PSRAnnotation(psr,probeset,'',probeset,strand,geneids,symbols,'','','',type)
psri.setChromosome(chr)
try: psri.setStart(int(t[starti]))
except Exception: continue
psri.setEnd(int(t[endi]))
psri.setTranscripts(ens_transcripts)
elif 'JUC' in psr:
type = 'Junction'
exon_cluster = string.split(string.split(t[xi],'///')[0],'_to_') ### grab the first exonIDs
constitutive = t[csi]
transcript_cluster = string.split(t[tci],'///')[0]
chr = t[sqi]; strand = t[si]
if constitutive == 'Non-Constituitive': nonconstitutive_junctions[probeset]=[]
try: five_exon,three_exon = pickShortestExonIDDiff(t[xi])
except Exception:
five_exon,three_exon = exon_cluster
five_EC,three_EC = five_exon,three_exon ### NOT SURE THIS IS CORRECT
junction_alinging_probesets[probeset] = [five_exon,five_exon], [three_exon,three_exon]
seq = t[jsi]
seq = string.lower(string.replace(seq,'|',''))
psri = PSRAnnotation(psr,probeset,'',transcript_cluster,strand,'','',exon_cluster,constitutive,seq,type)
try: junction_start = int(t[jse]); junction_end = int(t[jee])
except Exception: print t;sys.exit()
if '-' in strand: junction_start, junction_end = junction_end,junction_start
exon1s = junction_start-16; exon1e = junction_start
exon2s = junction_end; exon2e = junction_end+16
if '-' in strand:
junction_start, junction_end = junction_end,junction_start
exon1s = junction_start+16; exon1e = junction_start
exon2s = junction_end; exon2e = junction_end-16
psri.setTranscriptClusterID(transcript_cluster)
psri.setChromosome(chr)
#print chr, transcript_cluster, exon1s, exon2s, seq, five_EC, three_EC;sys.exit()
elif 'PSR' in psr:
type = 'Exon'
exon_cluster = string.split(t[xi],'///')[0] ### grab the first exonIDs
constitutive = t[csi]
transcript_cluster = string.split(t[tci],'///')[0]
chr = t[sqi]; strand = t[si]
if constitutive == 'Non-Constituitive': nonconstitutive_junctions[probeset]=[]
five_EC,three_EC = five_exon,three_exon ### NOT SURE THIS IS CORRECT
psri = PSRAnnotation(psr,probeset,'',transcript_cluster,strand,'','',exon_cluster,constitutive,'',type)
exon_start = int(t[starti]); exon_end = int(t[endi])
if '-' in strand: exon_start, exon_end = exon_end,exon_start
psri.setTranscriptClusterID(transcript_cluster)
psri.setChromosome(chr)
elif len(t)==15: ###Transcript Cluster ID Lines
probeset, probeset_name, ucsclink, transcript_cluster, genome_pos, strand, transcripts, geneids, symbols, descriptions, TR_count, JUC_count, PSR_count, EXs_count, ECs_count = t
type = 'TranscriptCluster'; seq=''; exon_cluster=''; constitutive=''
if '|' in geneids: geneids = string.replace(geneids,'|',',')
if '|' in symbols: symbols = string.replace(symbols,'|',',')
psri = PSRAnnotation(probeset_name,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_cluster,constitutive,seq,type)
elif 'TC' in t[0]: ###Transcript ID Lines - Glue array
probeset, probeset_name, ucsclink, transcript_cluster, genome_pos, strand, transcripts, geneids, symbols, descriptions, TR_count, JUC_count, PSR_count, EXs_count, ECs_count = t[:15]
type = 'TranscriptCluster'; seq=''; exon_cluster=''; constitutive=''; ucsclink = ''
if '|' in geneids: geneids = string.replace(geneids,'|',',')
if '|' in symbols: symbols = string.replace(symbols,'|',',')
psri = PSRAnnotation(probeset_name,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_cluster,constitutive,seq,type)
elif len(t)==28:###Junction ID Lines
probeset, probeset_name, ucsclink, transcript_cluster, genome_pos, strand, transcripts, geneids, symbols, descriptions, TR_count, JUC_count, PSR_count, EXs_count, ECs_count, junction_number, original_seq, exon_to_exon, observed_speculative, strand, five_PSR, three_PSR, five_EC, three_EC, Rel_5EC, Rel_3EC, constitutive, blat_junction = t
type = 'Junction'; exon_cluster = [five_EC,three_EC]
if constitutive == 'alternative': nonconstitutive_junctions[probeset]=[]
five_exon,three_exon = pickShortestExonIDDiff(exon_to_exon)
junction_alinging_probesets[probeset] = [five_PSR,five_exon], [three_PSR,three_exon]; seq = blat_junction
psri = PSRAnnotation(probeset_name,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_cluster,constitutive,seq,type)
elif len(t)==31 and len(t[29])>0: ###Junction ID Lines - Glue array
probeset, probeset_name, ucsclink, transcript_cluster, genome_pos, strand, transcripts, geneids, symbols, descriptions, TR_count, JUC_count, PSR_count, EXs_count, ECs_count, original_seq, genomic_position, exon_to_exon, observed_speculative, exon_cluster, constitutive, five_PSR, tr_hits, three_PSR, percent_tr_hits, five_EC, loc_5_3, three_EC, Rel_5EC, Rel_3EC, blat_junction = t
if '|' in geneids: geneids = string.replace(geneids,'|',',')
if '|' in symbols: symbols = string.replace(symbols,'|',',')
type = 'Junction'; exon_cluster = [five_EC,three_EC]; ucsclink = ''
if constitutive == 'alternative': nonconstitutive_junctions[probeset]=[]
five_exon,three_exon = pickShortestExonIDDiff(exon_to_exon)
junction_alinging_probesets[probeset] = [five_PSR,five_exon], [three_PSR,three_exon]; seq = blat_junction
psri = PSRAnnotation(probeset_name,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_cluster,constitutive,seq,type)
elif len(t)==24: ###Probeset ID Lines
probeset, probeset_name, ucsclink, transcript_cluster, genome_pos, strand, transcripts, geneids, symbols, descriptions, TR_count, JUC_count, PSR_count, EXs_count, ECs_count, PSR_region, genome_pos2, strand, exon_cluster, constitutive, TR_hits, percent_TR_hits, location_5to3_percent,seq = t
type = 'Exon'
psri = PSRAnnotation(probeset_name,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_cluster,constitutive,seq,type)
elif len(t)==31 and len(t[29])== 0:##Probeset ID Lines - Glue array
probeset, probeset_name, ucsclink, transcript_cluster, genome_pos, strand, transcripts, geneids, symbols, descriptions, TR_count, JUC_count, PSR_count, EXs_count, ECs_count, original_seq, genomic_position, exon_to_exon, observed_speculative, exon_cluster, constitutive, five_PSR, tr_hits, three_PSR, percent_tr_hits, five_EC, loc_5_3, three_EC, Rel_5EC, Rel_3EC, seq = t
if '|' in geneids: geneids = string.replace(geneids,'|',',')
if '|' in symbols: symbols = string.replace(symbols,'|',',')
type = 'Exon'; ucsclink = ''
psri = PSRAnnotation(probeset_name,probeset,ucsclink,transcript_cluster,strand,geneids,symbols,exon_cluster,constitutive,seq,type)
else:
#if k<40 and len(t)>5: print len(t),t; k+=1
type = 'null'
#print len(t),data;sys.exit()
### Exon clusters are equivalent to exon blocks in this schema and can be matched between junctions and exons
#if x < 20: print len(t),t[0],type
store = 'yes'
if extraction_type == 'Ensembl':
if type != 'TranscriptCluster': store = 'no'
elif extraction_type == 'sequence':
store = 'no'
if type == 'Exon' or type == 'Junction':
transcript_cluster_count[psri.TranscriptCluster()]=[]
if psri.TranscriptCluster() in ensembl_associations:
ens_geneid = ensembl_associations[psri.TranscriptCluster()][0]
critical_junctions=''
if type == 'Junction':
dw.write(probeset+'\t'+psri.JunctionSequence()+'\t\t\n')
seq = psri.JunctionSequences()[0]; exon_id = probeset+'|5'
seq_data = ExonSeqData(exon_id,psri.TranscriptCluster(),psri.TranscriptCluster()+':'+exon_id,critical_junctions,seq)
try: probeset_db[ens_geneid].append(seq_data)
except Exception: probeset_db[ens_geneid] = [seq_data]
try: seq_data.setExonStart(exon1s); seq_data.setExonStop(exon1e) ### HTA
except Exception: pass
seq = psri.JunctionSequences()[1]; exon_id = probeset+'|3'
seq_data = ExonSeqData(exon_id,psri.TranscriptCluster(),psri.TranscriptCluster()+':'+exon_id,critical_junctions,seq)
try: seq_data.setExonStart(exon2s); seq_data.setExonStop(exon2e) ### HTA
except Exception: pass
try: probeset_db[ens_geneid].append(seq_data)
except Exception: probeset_db[ens_geneid] = [seq_data]
transcript_cluster_count2[psri.TranscriptCluster()]=[]
elif type == 'Exon':
dw.write(probeset+'\t'+psri.Sequence()+'\t\t\n')
seq = psri.Sequence(); exon_id = probeset
seq_data = ExonSeqData(exon_id,psri.TranscriptCluster(),psri.TranscriptCluster()+':'+exon_id,critical_junctions,seq)
try: seq_data.setExonStart(exon_start); seq_data.setExonStop(exon_end) ### HTA
except Exception: pass
try: probeset_db[ens_geneid].append(seq_data)
except Exception: probeset_db[ens_geneid] = [seq_data]
transcript_cluster_count2[psri.TranscriptCluster()]=[]
if store == 'yes':
#if probeset in probeset_db: print probeset; sys.exit()
try: probeset_db[probeset] = psri
except Exception: null=[]
if type == 'TranscriptCluster':
tc+=1
if type == 'Junction':
#print 'here';sys.exit()
j+=1
if extraction_type == 'comparisons':
### Store the left exon-cluster and right exon-cluster for each junction
try: left_ec[five_EC].append(probeset)
except KeyError: left_ec[five_EC]=[probeset]
try: right_ec[three_EC].append(probeset)
except KeyError: right_ec[three_EC]=[probeset]
if type == 'Exon':
p+=1
if extraction_type == 'comparisons':
try: psr_ec[exon_cluster].append(probeset)
except KeyError: psr_ec[exon_cluster]=[probeset]
"""
print 'psid',psid; print 'probeset',probeset; print 'ucsclink',ucsclink
print 'transcript_cluster',transcript_cluster; print 'transcripts',transcripts
print 'geneids',geneids; print 'symbols',symbols; print 'seq',seq; kill"""
x+=1
print 'TCs:',tc, 'Junctions:',j, 'Exons:',p, 'Total:',x; #sys.exit()
#print 'JUC0900017373',probeset_db['JUC0900017373'].Sequence()
#print 'JUC0900017385',probeset_db['JUC0900017385'].Sequence();kill
if extraction_type == 'sequence':
dw.close()
print len(probeset_db),'Entries exported from Junction annotation file'
return probeset_db
if extraction_type == 'Ensembl':
print len(probeset_db),'Entries exported from Junction annotation file'
return probeset_db
if extraction_type == 'comparisons':
global junction_inclusion_db; global ensembl_exon_db; global exon_gene_db
junction_inclusion_db = JunctionArrayEnsemblRules.reimportJunctionComps(species,array_type,'original')
ensembl_exon_db,exon_gene_db = JunctionArrayEnsemblRules.importAndReformatEnsemblJunctionAnnotations(species,array_type,nonconstitutive_junctions)
global failed_db; failed_db={}
global passed_db; passed_db={}
print len(junction_inclusion_db)
identifyCompetitiveJunctions(right_ec,"3'")
identifyCompetitiveJunctions(left_ec,"5'")
print 'len(passed_db)',len(passed_db),'len(failed_db)',len(failed_db)
print 'len(junction_inclusion_db)',len(junction_inclusion_db)
exportUpdatedJunctionComps(species,array_type)
def exportUpdatedJunctionComps(species,array_type,searchChr=None):
db_version = unique.getCurrentGeneDatabaseVersion() ### Only need this since we are exporting to root_dir for RNASeq
if array_type == 'RNASeq': species,root_dir=species
else: root_dir = ''
lines_exported=0
if searchChr !=None:
probeset_junction_export = root_dir+'AltDatabase/'+db_version+'/'+ species + '/'+array_type+'/comps/'+ species + '_junction_comps_updated.'+searchChr+'.txt'
else:
probeset_junction_export = root_dir+'AltDatabase/'+db_version+'/'+ species + '/'+array_type+'/'+ species + '_junction_comps_updated.txt'
if array_type == 'RNASeq':
data,status = RNASeq.AppendOrWrite(probeset_junction_export) ### Creates a new file or appends if already existing (import is chromosome by chromosome)
else:
data = export.ExportFile(probeset_junction_export); status = 'not found'
if array_type != 'RNASeq': print "Exporting",probeset_junction_export
if status == 'not found':
title = 'gene'+'\t'+'critical_exon'+'\t'+'exclusion_junction_region'+'\t'+'inclusion_junction_region'+'\t'+'exclusion_probeset'+'\t'+'inclusion_probeset'+'\t'+'data_source'+'\n'
data.write(title)
for i in junction_inclusion_db:
critical_exons=[]
for ji in junction_inclusion_db[i]:
#value = string.join([ji.GeneID(),ji.CriticalExon(),ji.ExclusionJunction(),ji.InclusionJunction(),ji.ExclusionProbeset(),ji.InclusionProbeset(),ji.DataSource()],'\t')+'\n'
### Combine all critical exons for a probeset pair
critical_exons.append(ji.CriticalExon())
critical_exons = unique.unique(critical_exons); critical_exons = string.join(critical_exons,'|'); ji.setCriticalExons(critical_exons); lines_exported+=1
data.write(ji.OutputLine())
data.close()
if array_type != 'RNASeq':
print lines_exported,'for',probeset_junction_export
def identifyCompetitiveJunctions(exon_cluster_db,junction_type):
"""To identify critical exons (e.g., the alternatively spliced exon sequence for two alternative exon-junctions), this script:
1) Finds pairs of junctions that contain the same 5' or 3' exon-cluster (genomic overlapping transcript exons)
2) Determines which junction has exons that are closes in genomic space, between the pair of junctions (based on exon-cluster ID number or exon ID)
3) Selects the non-common exon and stores the junction sequence for that exon
4) Selects any exon probeset ID that is annotated as overlapping with the critical exon
The greatest assumption with this method is that the critical exon is choosen based on the numerical ID in the exon-cluster or exon ID (when the exon-clusters
between the two junctions are the same). For example looked at, this appears to be true (e.g., two exons that make up a junction have a difference of 1 in their ID),
but this may not always be the case. Ideally, this method is more extensively tested by evaluating junction and exon sequences mapped to genomic coordinates
and AltAnalyze exon block and region coordinates to verify the critical exon selection."""
passed=0; failed=0; already_added=0
if junction_type == "5'": index = 1
else: index = 0
for ec in exon_cluster_db:
if len(exon_cluster_db[ec])>1:
junction_comps={} ### Calculate all possible pairwise-junction comparisons
for junction1 in exon_cluster_db[ec]:
for junction2 in exon_cluster_db[ec]:
if junction1 != junction2: temp = [junction1,junction2]; temp.sort(); junction_comps[tuple(temp)]=[]
for (junction1,junction2) in junction_comps:
store_data = 'no'
if (junction1,junction2) in junction_inclusion_db or (junction2,junction1) in junction_inclusion_db:
already_added+=1
elif junction1 in ensembl_exon_db and junction2 in ensembl_exon_db: ### Thus, these are mapped to the genome
ed1 = ensembl_exon_db[junction1]; ed2 = ensembl_exon_db[junction2]
ensembl_gene_id = ed1.GeneID()
try: diff1 = ed1.JunctionDistance(); diff2 = ed2.JunctionDistance()
except Exception:
print junction1,junction2
psri1 = probeset_db[junction1]
psri2 = probeset_db[junction2]
print psri1.Probeset(), psri2.Probeset()
kill
### Using the ranked exon-cluster IDs
psri1 = probeset_db[junction1]; exon1a = psri1.ExternalExonClusterIDs()[0]; exon1b = psri1.ExternalExonClusterIDs()[-1]
psri2 = probeset_db[junction2]; exon2a = psri2.ExternalExonClusterIDs()[0]; exon2b = psri2.ExternalExonClusterIDs()[-1]
try: diffX1 = abs(int(exon1a[5:])-int(exon1b[5:])); diffX2 = abs(int(exon2a[5:])-int(exon2b[5:]))
except Exception:
diffX1 = abs(int(exon1a[4:-4])-int(exon1b[4:-4])); diffX2 = abs(int(exon2a[4:-4])-int(exon2b[4:-4]))
junction1_exon_id = ed1.ExonID(); junction2_exon_id = ed2.ExonID()
if diffX1==0 or diffX2==0: null=[] ### splicing occurs within a single exon-cluster
elif diff1<diff2: ### Thus the first junction contains the critical exon
#critical_exon_seq = psri1.JunctionSequences()[index] ### if left most exon in junction is common, then choose the most proximal right exon as critical
incl_junction_probeset = junction1; excl_junction_probeset = junction2
incl_junction_id = junction1_exon_id; excl_junction_id = junction2_exon_id
incl_exon_probeset,incl_exon_id = junction_alinging_probesets[junction1][index]
store_data = 'yes'
elif diff2<diff1:
incl_junction_probeset = junction2; excl_junction_probeset = junction1
incl_junction_id = junction2_exon_id; excl_junction_id = junction1_exon_id
incl_exon_probeset,incl_exon_id = junction_alinging_probesets[junction2][index]
store_data = 'yes'
if store_data == 'yes':
critical_exon_id = string.split(incl_junction_id,'-')[index]; critical_exon_id = string.replace(critical_exon_id,'.','-')
if incl_exon_probeset in ensembl_exon_db:
if (excl_junction_probeset,incl_exon_probeset) in junction_inclusion_db or (incl_exon_probeset,excl_junction_probeset) in junction_inclusion_db:
already_added+=1
else:
critical_exon_id = ensembl_exon_db[incl_exon_probeset]
ji=JunctionArrayEnsemblRules.JunctionInformation(ensembl_gene_id,critical_exon_id,excl_junction_id,critical_exon_id,excl_junction_probeset,incl_exon_probeset,'Affymetrix')
try: junction_inclusion_db[excl_junction_probeset,incl_exon_probeset].append(ji)
except Exception: junction_inclusion_db[excl_junction_probeset,incl_exon_probeset] = [ji]
#value = string.join([ji.GeneID(),ji.CriticalExon(),ji.ExclusionJunction(),ji.InclusionJunction(),ji.ExclusionProbeset(),ji.InclusionProbeset(),ji.DataSource()],'\t')+'\n'
#print ji.OutputLine();kill
#print [[critical_exon_id,junction2,ed2.ExonID(), ed1.JunctionCoordinates(), ed2.JunctionCoordinates(), diff1,diff2]]
passed+=1
passed_db[junction1,junction2]=[]
ji=JunctionArrayEnsemblRules.JunctionInformation(ensembl_gene_id,critical_exon_id,excl_junction_id,incl_junction_id,excl_junction_probeset,incl_junction_probeset,'Affymetrix')
#print ensembl_gene_id,critical_exon_id,excl_junction_id,incl_junction_id,excl_junction_probeset,incl_junction_probeset;kill
#print [critical_exon_id,junction1,junction2,ed1.ExonID(),ed2.ExonID(), ed1.JunctionCoordinates(), ed2.JunctionCoordinates(), diff1,diff2]
try: junction_inclusion_db[exclusion_junction,inclusion_junction].append(ji)
except Exception: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset] = [ji]
print 'already_added:',already_added,'passed:',passed,'failed:',failed
def identifyJunctionComps(species,array_type,specific_array_type):
### At this point, probeset-to-exon-region associations are built for exon and junction probesets along with critical exons and reciprocol junctions
### Now, associate the reciprocol junctions/critical exons (Ensembl/UCSC based) with junction array probesets and export to junction Hs_junction_comps.txt
JunctionArrayEnsemblRules.getJunctionComparisonsFromExport(species,array_type)
### Next, do this for reciprocal junctions predicted directly from Affymetrix's annotations
extraction_type = 'comparisons'
tc_ensembl_annotations = importJunctionArrayAnnotationMappings(array_type,specific_array_type,species,extraction_type)
inferJunctionComps(species,array_type)
def filterForCriticalExons(species,array_type):
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_'+array_type+'_probesets.txt'
importForFiltering(species,array_type,filename,'exclude_junction_psrs')
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_probeset_microRNAs_any.txt'
importForFiltering(species,array_type,filename,'include_critical_exon_ids')
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_probeset_microRNAs_multiple.txt'
importForFiltering(species,array_type,filename,'include_critical_exon_ids')
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_domain_aligning_probesets.txt'
importForFiltering(species,array_type,filename,'include_critical_exon_ids')
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_indirect_domain_aligning_probesets.txt'
importForFiltering(species,array_type,filename,'include_critical_exon_ids')
filename = 'AltDatabase/'+species+'/'+array_type+'/exon/probeset-protein-annotations-exoncomp.txt'
importForFiltering(species,array_type,filename,'exclude_critical_exon_ids')
filename = 'AltDatabase/'+species+'/'+array_type+'/exon/probeset-domain-annotations-exoncomp.txt'
importForFiltering(species,array_type,filename,'exclude_critical_exon_ids')
def importForFiltering(species,array_type,filename,export_type):
fn=filepath(filename); dbase={}; x = 0
print 'Filtering:',filename
dbase['filename'] = filename
###Import expression data (non-log space)
for line in open(fn,'r').xreadlines():
data = cleanUpLine(line); splitup = 'no'
if x == 0: x=1; dbase['title'] = line; x+=1 ###Grab expression values
if x !=0:
key = string.split(data,'\t')[0]
if ':' in key:
old_key = key
key = string.split(key,':')[1]
line = string.replace(line,old_key,key)
if '|' in key: ### Get rid of |5 or |3
line = string.replace(line,key,key[:-2])
if export_type == 'exclude_critical_exon_ids': splitup = 'yes'
if splitup == 'no':
try: dbase[key].append(line)
except Exception: dbase[key] = [line]
#print len(dbase)
filterExistingFiles(species,array_type,dbase,export_type)
def importGenericAppend(filename,key_db):
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
key_db[t[0]] = t[1:]
return key_db
def importGenericReverse(filename):
db={}
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
db[t[-1]] = t[0]
return db
def importGenericAppendDBList(filename,key_db):
fn=filepath(filename)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
try: key_db[t[0]].append(t[1])
except KeyError: key_db[t[0]] = [t[1]]
return key_db
def combineExonJunctionAnnotations(species,array_type):
###Currently used for RNASeq databases to minimize the number of files supplied to the user
collapseSequenceFiles(species,array_type)
overRideJunctionEntriesWithExons(species,array_type)
collapseDomainAlignmentFiles(species,array_type,species+'_Ensembl_domain_aligning_probesets.txt')
collapseDomainAlignmentFiles(species,array_type,species+'_Ensembl_indirect_domain_aligning_probesets.txt')
def collapseDomainAlignmentFiles(species,array_type,filename):
original_filename = 'AltDatabase/'+species+'/'+array_type+'/'+filename
domain_db = importGenericAppendDBList(original_filename,{})
filename = 'AltDatabase/'+species+'/'+array_type+'/junction/'+filename
domain_db = importGenericAppendDBList(filename,domain_db); del domain_db['Probeset']
header = 'Probeset\tInterPro-Description\n'
exportGenericList(domain_db,original_filename,header)
def exportGenericList(db,filename,header):
data_export = export.ExportFile(filename)
if len(header)>0: data_export.write(header)
print 'Re-writing',filename
for key in db:
for i in db[key]: data_export.write(string.join([key]+[i],'\t')+'\n')
data_export.close()
def collapseSequenceFiles(species,array_type):
original_filename = 'AltDatabase/'+species+'/'+array_type+'/SEQUENCE-protein-dbase_exoncomp.txt'
seq_db = importGenericAppend(original_filename,{})
filename = 'AltDatabase/'+species+'/'+array_type+'/exon/SEQUENCE-protein-dbase_exoncomp.txt'
try: seq_db = importGenericAppend(filename,seq_db)
except Exception: print 'SEQUENCE-protein-dbase_exoncomp.txt - exon version not found'
filename = 'AltDatabase/'+species+'/'+array_type+'/junction/SEQUENCE-protein-dbase_exoncomp.txt'
try: seq_db = importGenericAppend(filename,seq_db)
except Exception: print 'SEQUENCE-protein-dbase_exoncomp.txt - junction version not found'
exportGeneric(seq_db,original_filename,[])
def exportGeneric(db,filename,header):
data_export = export.ExportFile(filename)
if len(header)>0: data_export.write(header)
print 'Re-writing',filename
for key in db:
data_export.write(string.join([key]+db[key],'\t')+'\n')
data_export.close()
def overRideJunctionEntriesWithExons(species,array_type):
filename1 = 'AltDatabase/'+species+'/'+array_type+'/exon/probeset-protein-annotations-exoncomp.txt'
filename2 = 'AltDatabase/'+species+'/'+array_type+'/junction/probeset-protein-annotations-exoncomp.txt'
overRideExistingEntries(filename1,filename2)
filename1 = 'AltDatabase/'+species+'/'+array_type+'/exon/probeset-domain-annotations-exoncomp.txt'
filename2 = 'AltDatabase/'+species+'/'+array_type+'/junction/probeset-domain-annotations-exoncomp.txt'
overRideExistingEntries(filename1,filename2)
def overRideExonEntriesWithJunctions(species,array_type):
filename1 = 'AltDatabase/'+species+'/'+array_type+'/junction/'+species+'_Ensembl_domain_aligning_probesets.txt'
filename2 = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_domain_aligning_probesets-filtered.txt'
overRideExistingEntries(filename1,filename2)
filename1 = 'AltDatabase/'+species+'/'+array_type+'/junction/'+species+'_Ensembl_indirect_domain_aligning_probesets.txt'
filename2 = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_indirect_domain_aligning_probesets-filtered.txt'
overRideExistingEntries(filename1,filename2)
filename1 = 'AltDatabase/'+species+'/'+array_type+'/junction/probeset-protein-annotations-exoncomp.txt'
filename2 = 'AltDatabase/'+species+'/'+array_type+'/exon/probeset-protein-annotations-exoncomp-filtered.txt'
overRideExistingEntries(filename1,filename2)
filename1 = 'AltDatabase/'+species+'/'+array_type+'/junction/probeset-domain-annotations-exoncomp.txt'
filename2 = 'AltDatabase/'+species+'/'+array_type+'/exon/probeset-domain-annotations-exoncomp-filtered.txt'
overRideExistingEntries(filename1,filename2)
def overRideExistingEntries(file_include,file_exclude):
### Imports two files and over-rides entries in one with another
### These are the filtered entries to replace
fn=filepath(file_include); dbase_include={}; x = 0
for line in open(fn,'r').xreadlines():
data = cleanUpLine(line)
key = string.split(data,'\t')[0]
try: dbase_include[key].append(line)
except Exception: dbase_include[key] = [line]
x+=1
print x;title=''
fn=filepath(file_exclude); dbase_exclude={}; x = 0
for line in open(fn,'r').xreadlines():
data = cleanUpLine(line)
if x == 0: x=1; title = line; x+=1
if x != 0:
key = string.split(data,'\t')[0]
try: dbase_exclude[key].append(line)
except Exception: dbase_exclude[key] = [line]
x+=1
print x
count=0
for key in dbase_exclude: count+=1
print file_exclude, count
count=0
for key in dbase_include:
dbase_exclude[key] = dbase_include[key]
count+=1
print file_exclude, count
dbase_exclude = eliminate_redundant_dict_values(dbase_exclude)
data_export = export.ExportFile(file_exclude)
count=0
print 'Re-writing',file_exclude,'with junction aligned entries.'
try: data_export.write(title)
except Exception: null=[] ### Occurs when no alternative isoforms present for this genome
for key in dbase_exclude:
for line in dbase_exclude[key]:
data_export.write(line); count+=1
data_export.close()
print count
def clearObjectsFromMemory(db_to_clear):
db_keys={}
for key in db_to_clear: db_keys[key]=[]
for key in db_keys:
try: del db_to_clear[key]
except Exception:
try:
for i in key: del i ### For lists of tuples
except Exception: del key ### For plain lists
class JunctionInformationSimple:
def __init__(self,critical_exon,excl_junction,incl_junction,excl_probeset,incl_probeset):
self._critical_exon = critical_exon; self.excl_junction = excl_junction; self.incl_junction = incl_junction
self.excl_probeset = excl_probeset; self.incl_probeset = incl_probeset
#self.critical_exon_sets = string.split(critical_exon,'|')
self.critical_exon_sets = [critical_exon]
def CriticalExon(self):
ce = str(self._critical_exon)
if '-' in ce: ce = string.replace(ce,'-','.')
return ce
def CriticalExonList(self):
critical_exon_str = self.CriticalExon()
critical_exons = string.split(critical_exon_str,'|')
return critical_exons
def setCriticalExons(self,critical_exons): self._critical_exon = critical_exons
def setCriticalExonSets(self,critical_exon_sets): self.critical_exon_sets = critical_exon_sets
def setInclusionProbeset(self,incl_probeset): self.incl_probeset = incl_probeset
def setInclusionJunction(self,incl_junction): self.incl_junction = incl_junction
def CriticalExonSets(self): return self.critical_exon_sets ### list of critical exons (can select any or all for functional analysis)
def InclusionJunction(self): return self.incl_junction
def ExclusionJunction(self): return self.excl_junction
def InclusionProbeset(self): return self.incl_probeset
def ExclusionProbeset(self): return self.excl_probeset
def setNovelEvent(self,novel_event): self._novel_event = novel_event
def NovelEvent(self): return self._novel_event
def setInclusionLookup(self,incl_junction_probeset): self.incl_junction_probeset = incl_junction_probeset
def InclusionLookup(self): return self.incl_junction_probeset
def __repr__(self): return self.GeneID()
def getPutativeSpliceEvents(species,array_type,exon_db,agglomerate_inclusion_probesets,root_dir):
alt_junction_db={}; critical_exon_db={}; critical_agglomerated={}; exon_inclusion_agglom={}; incl_junctions_agglom={}; exon_dbase={}
exon_inclusion_db={}; comparisons=0
### Previously, JunctionArrayEnsemblRules.reimportJunctionComps (see above) used for import---> too memory intensive
if array_type == 'junction': root_dir=''
filename = root_dir+'AltDatabase/' + species + '/'+array_type+'/'+ species + '_junction_comps_updated.txt'
fn=filepath(filename); junction_inclusion_db={}; x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line); junction_info=[]
gene,critical_exon,excl_junction,incl_junction,excl_probeset,incl_probeset,source = string.split(data,'\t')
if source == 'AltAnalyze': novel_exon = 'known'
else: novel_exon = 'novel'
"""
if gene == 'ENSG00000140464':
a=0; b=0
if excl_probeset in exon_db: a = 1
if incl_probeset in exon_db: b = 1
#print incl_probeset, a, b, excl_probeset, critical_exon
"""
try:
null=exon_db[excl_probeset] ### Exclusion needs to be present
if incl_probeset in exon_db:
ji = JunctionInformationSimple(critical_exon,excl_junction,incl_junction,excl_probeset,incl_probeset)
junction_info.append(ji)
ji.setNovelEvent(novel_exon) ### Indicates known or novel splicing event
#print [ji.InclusionProbeset(),ji.ExclusionProbeset()]
if array_type == 'RNASeq':
critical_exons = string.split(critical_exon,'|')
for ce in critical_exons:
critical_exon_probeset = gene+':'+ce
ji=JunctionInformationSimple(ce,excl_junction,ce,excl_probeset,critical_exon_probeset)
junction_info.append(ji); ji.setInclusionLookup(incl_probeset) ### Use this ID to get protein and domain annotations
ji.setNovelEvent(novel_exon) ### Indicates known or novel splicing event
"""
if gene == 'ENSG00000140464' and ce == 'E5.2':
a=0; b=0
if ji.ExclusionProbeset() in exon_db: a = 1
if ji.InclusionProbeset() in exon_db: b = 1
print [ji.InclusionProbeset()],a,b;kill
"""
#print [ji.InclusionProbeset(),ji.ExclusionProbeset()];kill
for ji in junction_info:
try:
geneid=exon_db[ji.InclusionProbeset()].GeneID() ### This inclusion needs to be present
if agglomerate_inclusion_probesets == 'yes':
exclProbeset = ji.ExclusionProbeset(); inclProbeset = ji.InclusionProbeset()
exon_inclusion_agglom[exclProbeset] = ji ### Just need one example
try: critical_exon_db[exclProbeset].append(ji.CriticalExon())
except Exception: critical_exon_db[exclProbeset]=[ji.CriticalExon()]
try: critical_agglomerated[exclProbeset]+=ji.CriticalExonList()
except Exception: critical_agglomerated[exclProbeset]=ji.CriticalExonList()
try: incl_junctions_agglom[exclProbeset].append(ji.InclusionJunction())
except Exception: incl_junctions_agglom[exclProbeset]=[ji.InclusionJunction()]
try: exon_inclusion_db[exclProbeset].append(inclProbeset)
except Exception: exon_inclusion_db[exclProbeset]=[inclProbeset]
else:
try: alt_junction_db[geneid].append(ji)
except Exception: alt_junction_db[geneid] = [ji]
comparisons+=1
except KeyError: null=[]
except KeyError: null=[]
#print comparisons, "Junction comparisons in database"
if agglomerate_inclusion_probesets == 'yes':
alt_junction_agglom={}
for excl in exon_inclusion_db:
ji = exon_inclusion_agglom[excl]
ed = exon_db[ji.InclusionProbeset()]; ed1 = ed
geneid = ed.GeneID() ### If two genes are present for trans-splicing, over-ride with the one in the database
critical_exon_sets = unique.unique(critical_exon_db[excl])
incl_probesets = unique.unique(exon_inclusion_db[excl])
exon_inclusion_db[excl] = incl_probesets
critical_exons = unique.unique(critical_agglomerated[excl]); critical_exons.sort()
incl_junctions = unique.unique(incl_junctions_agglom[excl]); incl_junctions.sort()
ji.setCriticalExons(string.join(critical_exons,'|'))
ji.setInclusionJunction(string.join(incl_junctions,'|'))
ji.setInclusionProbeset(string.join(incl_probesets,'|'))
ji.setCriticalExonSets(critical_exon_sets)
ed1.setProbeset(string.replace(incl_probesets[0],'@',':')) ### Actually needs to be the first entry to match of re-import of a filtered list for exon_db (full not abbreviated)
#if '|' in ji.InclusionProbeset(): print ji.InclusionProbeset(), string.replace(incl_probesets[0],'@',':');sys.exit()
#print string.join(incl_probesets,'|'),ji.InclusionProbeset();kill
### Create new agglomerated inclusion probeset entry
#ed1.setProbeset(ji.InclusionProbeset()) ### Agglomerated probesets
ed1.setDisplayExonID(string.join(incl_junctions,'|'))
exon_db[ji.InclusionProbeset()] = ed1 ### Agglomerated probesets
#if 'ENSMUSG00000032497:E23.1-E24.1' in ji.InclusionProbeset():
#print ji.InclusionProbeset();sys.exit()
#if '198878' in ji.InclusionProbeset(): print ji.InclusionProbeset(),excl
try: alt_junction_db[geneid].append(ji)
except Exception: alt_junction_db[geneid] = [ji]
del exon_inclusion_agglom
critical_exon_db={}
if array_type == 'RNASeq':
### Need to remove the @ from the IDs
for e in exon_inclusion_db:
incl_probesets=[]
for i in exon_inclusion_db[e]:
incl_probesets.append(string.replace(i,'@',':'))
exon_inclusion_db[e] = incl_probesets
#clearObjectsFromMemory(junction_inclusion_db); junction_inclusion_db=[]
critical_agglomerated=[];exon_inclusion_agglom={}; incl_junctions_agglom={}
""" Not used for junction or RNASeq platforms
if array_type == 'AltMouse':
for probeset in array_id_db:
try:
geneid = exon_db[probeset].GeneID()
exons = exon_db[probeset].ExonID()
exon_dbase[geneid,exons] = probeset
except Exception: null=[]
"""
#print '--------------------------------------------'
### Eliminate redundant entries
objects_to_delete=[]
for geneid in alt_junction_db:
junction_temp_db={}; junction_temp_ls=[]
for ji in alt_junction_db[geneid]: ### Redundant entries can be present
id = ji.ExclusionProbeset(),ji.InclusionProbeset()
if id in junction_temp_db: objects_to_delete.append(ji)
else: junction_temp_db[id]=ji
for i in junction_temp_db:
ji = junction_temp_db[i]; junction_temp_ls.append(ji)
alt_junction_db[geneid]=junction_temp_ls
"""
for ji in alt_junction_db['ENSG00000140464']:
print ji.ExclusionProbeset(), ji.InclusionProbeset(), ji.CriticalExon(), ji.ExclusionJunction(), ji.InclusionJunction()
kill
"""
clearObjectsFromMemory(objects_to_delete); objects_to_delete=[]
return alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db,exon_db
def getPutativeSpliceEventsOriginal(species,array_type,exon_db,agglomerate_inclusion_probesets,root_dir):
junction_inclusion_db = JunctionArrayEnsemblRules.reimportJunctionComps((species,root_dir),array_type,'updated')
alt_junction_db={}; critical_exon_db={}; critical_agglomerated={}; exon_inclusion_agglom={}; incl_junctions_agglom={}; exon_dbase={}
exon_inclusion_db={}; comparisons=0
for i in junction_inclusion_db:
critical_exons=[]
for ji in junction_inclusion_db[i]:
#ji.GeneID(),ji.CriticalExon(),ji.ExclusionJunction(),ji.InclusionJunction(),ji.ExclusionProbeset(),ji.InclusionProbeset(),ji.DataSource()
if agglomerate_inclusion_probesets == 'yes':
if ji.InclusionProbeset() in exon_db and ji.ExclusionProbeset() in exon_db:
if array_type == 'RNASeq':
exclProbeset = ji.ExclusionProbeset(); inclProbeset=JunctionArrayEnsemblRules.formatID(ji.InclusionProbeset())
else: exclProbeset = ji.ExclusionProbeset(); inclProbeset = ji.InclusionProbeset()
exon_inclusion_agglom[exclProbeset] = ji ### Just need one example
try: critical_exon_db[exclProbeset].append(ji.CriticalExon())
except Exception: critical_exon_db[exclProbeset]=[ji.CriticalExon()]
try: critical_agglomerated[exclProbeset]+=ji.CriticalExonList()
except Exception: critical_agglomerated[exclProbeset]=ji.CriticalExonList()
try: incl_junctions_agglom[exclProbeset].append(ji.InclusionJunction())
except Exception: incl_junctions_agglom[exclProbeset]=[ji.InclusionJunction()]
try: exon_inclusion_db[exclProbeset].append(inclProbeset)
except Exception: exon_inclusion_db[exclProbeset]=[inclProbeset]
else:
try:
geneid = exon_db[ji.InclusionProbeset()].GeneID() ### If two genes are present for trans-splicing, over-ride with the one in the database
try: alt_junction_db[geneid].append(ji)
except Exception: alt_junction_db[geneid] = [ji]
comparisons+=1
except Exception: geneid = ji.GeneID() ### If not in the local user datasets (don't think these genes need to be added)
#print comparisons, "Junction comparisons in database"
if agglomerate_inclusion_probesets == 'yes':
alt_junction_agglom={}
for excl in exon_inclusion_db:
ji = exon_inclusion_agglom[excl]
ed = exon_db[ji.InclusionProbeset()]; ed1 = ed
geneid = ed.GeneID() ### If two genes are present for trans-splicing, over-ride with the one in the database
critical_exon_sets = unique.unique(critical_exon_db[excl])
incl_probesets = unique.unique(exon_inclusion_db[excl])
exon_inclusion_db[excl] = incl_probesets
critical_exons = unique.unique(critical_agglomerated[excl]); critical_exons.sort()
incl_junctions = unique.unique(incl_junctions_agglom[excl]); incl_junctions.sort()
ji.setCriticalExons(string.join(critical_exons,'|'))
ji.setInclusionJunction(string.join(incl_junctions,'|'))
ji.setInclusionProbeset(string.join(incl_probesets,'|'))
ji.setCriticalExonSets(critical_exon_sets)
ed1.setProbeset(string.replace(incl_probesets[0],'@',':')) ### Actually needs to be the first entry to match of re-import of a filtered list for exon_db (full not abbreviated)
#if '|' in ji.InclusionProbeset(): print ji.InclusionProbeset(), string.replace(incl_probesets[0],'@',':');sys.exit()
#print string.join(incl_probesets,'|'),ji.InclusionProbeset();kill
### Create new agglomerated inclusion probeset entry
#ed1.setProbeset(ji.InclusionProbeset()) ### Agglomerated probesets
ed1.setDisplayExonID(string.join(incl_junctions,'|'))
exon_db[ji.InclusionProbeset()] = ed1 ### Agglomerated probesets
#if 'ENSMUSG00000032497:E23.1-E24.1' in ji.InclusionProbeset():
#print ji.InclusionProbeset();sys.exit()
#if '198878' in ji.InclusionProbeset(): print ji.InclusionProbeset(),excl
try: alt_junction_db[geneid].append(ji)
except Exception: alt_junction_db[geneid] = [ji]
del exon_inclusion_agglom
critical_exon_db={}
if array_type == 'RNASeq':
### Need to remove the @ from the IDs
for e in exon_inclusion_db:
incl_probesets=[]
for i in exon_inclusion_db[e]:
incl_probesets.append(string.replace(i,'@',':'))
exon_inclusion_db[e] = incl_probesets
### Eliminate redundant entries
objects_to_delete=[]
for geneid in alt_junction_db:
junction_temp_db={}; junction_temp_ls=[]
for ji in alt_junction_db[geneid]: ### Redundant entries can be present
id = ji.ExclusionProbeset(),ji.InclusionProbeset()
if id in junction_temp_db: objects_to_delete.append(ji)
else: junction_temp_db[id]=ji
for i in junction_temp_db:
ji = junction_temp_db[i]; junction_temp_ls.append(ji)
alt_junction_db[geneid]=junction_temp_ls
clearObjectsFromMemory(objects_to_delete); objects_to_delete=[]
return alt_junction_db,critical_exon_db,exon_dbase,exon_inclusion_db,exon_db
def filterExistingFiles(species,array_type,db,export_type):
"""Remove probesets entries (including 5' and 3' junction exons) from the database that don't indicate possible critical exons"""
export_exon_filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_'+array_type+'_probesets.txt'
ensembl_probeset_db = ExonArrayEnsemblRules.reimportEnsemblProbesetsForSeqExtraction(export_exon_filename,'probesets',{})
critical_junction_db = {}; critical_probeset_db={}; crit1={}
junction_inclusion_db = JunctionArrayEnsemblRules.reimportJunctionComps(species,array_type,'updated')
for ids in junction_inclusion_db:
for jd in junction_inclusion_db[ids]:
critical_exon_id = jd.ParentCriticalExon()
critical_id = jd.GeneID()+':'+jd.CriticalExon()
critical_exon_ids = string.split(critical_exon_id,'|')
critical_junction_db[jd.ExclusionProbeset(),jd.InclusionProbeset()]=critical_exon_ids,critical_id
crit1[critical_id]=[]
"""
for id in crit1:
if 'ENSMUSG00000066842' in id: print id
stop
"""
#print len(crit1);
crit2={}
for (pX,probeset) in critical_junction_db:
###Keep only junction probesets that contain possible critical exons
p1 = probeset+'|5'; p2 = probeset+'|3'
c1s,critical_id = critical_junction_db[(pX,probeset)]; proceed = 'no'
#print p1, p2, c1s, critical_id
#for probeset in db: print [probeset];kill
if probeset in ensembl_probeset_db and probeset in db:
critical_probeset_db[probeset,critical_id]=db[probeset]
crit2[probeset]=[]
else:
if p1 in ensembl_probeset_db and p1 in db:
c2s = ensembl_probeset_db[p1]; p = p1
c2s = string.split(c2s,'|')
for c1 in c1s:
if c1 in c2s:
critical_probeset_db[p,critical_id]=db[p]
crit2[probeset]=[]
if p2 in ensembl_probeset_db and p2 in db:
c2s = ensembl_probeset_db[p2]; p = p2
c2s = string.split(c2s,'|')
for c1 in c1s:
if c1 in c2s:
critical_probeset_db[p,critical_id]=db[p]
crit2[probeset]=[]
for probeset in ensembl_probeset_db: ### For non-junction probesets
if '|' not in probeset:
if probeset in db: critical_probeset_db[probeset,probeset]=db[probeset]; crit2[probeset]=[]
critical_probeset_db = eliminate_redundant_dict_values(critical_probeset_db)
print len(crit2),'len(crit2)'
x=0
"""
for probeset in db:
if probeset not in crit2:
x+=1
if x<20: print probeset """
print len(critical_probeset_db),': length of filtered db', len(db), ': length of db'
"""
for probeset in ensembl_probeset_db:
###Keep only probesets that contain possible critical exons
if '|' in probeset:
if probeset[:-2] in critical_junction_db and probeset in db:
critical_probeset_db[probeset[:-2]]=db[probeset]
elif probeset in db: critical_probeset_db[probeset]=db[probeset] """
"""
for id in critical_probeset_db:
if 'ENSMUSG00000066842' in id[1]: print id
stop
"""
if export_type == 'exclude_junction_psrs':
critical_probeset_db['title'] = db['title']
critical_probeset_db['filename'] = db['filename']
exportFiltered(critical_probeset_db)
else:
for p in db:
if '|' not in p: probeset = p
else: probeset = p[:-2]
if probeset not in crit2:
### Add back any junction probesets that do not have a critical exon component
critical_probeset_db[probeset,probeset]=db[p]
if export_type == 'exclude_critical_exon_ids':
critical_probeset_db2={}
for (p,cid) in critical_probeset_db:
if ':' in cid or '|' in p:
critical_probeset_db2[p[:-2],p[:-2]] = critical_probeset_db[(p,cid)]
else: critical_probeset_db2[p,p] = critical_probeset_db[(p,cid)]
critical_probeset_db = critical_probeset_db2
critical_probeset_db['title'] = db['title']
critical_probeset_db['filename'] = db['filename']
exportFiltered(critical_probeset_db)
########### Code originally designed for AltMouseA array database builds (adapted for use with Mouse and Human Junction Arrays)
def filterExpressionData(filename1,filename2,pre_filtered_db,constitutive_db):
fn2=filepath(filename2)
probeset_translation_db={}
###Import probeset number/id relationships (note: forced to use numeric IDs for Plier/Exact analysis)
if analysis_method != 'rma':
for line in open(fn2,'r').xreadlines():
data = cleanUpLine(line)
probeset_number,probeset_id = string.split(data,'\t')
probeset_translation_db[probeset_number]=probeset_id
fn=filepath(filename1)
exp_dbase={}; d = 0; x = 0
###Import expression data (non-log space)
try:
for line in open(fn,'r').xreadlines():
data = cleanUpLine(line)
if data[0] != '#' and x == 1: ###Grab expression values
tab_delimited_data = string.split(data,'\t')
z = len(tab_delimited_data)
probeset = tab_delimited_data[0]
if analysis_method == 'rma': exp_vals = tab_delimited_data[1:]
else: exp_vals = convertToLog2(tab_delimited_data[1:])
###Filter results based on whether a sufficient number of samples where detected as Present
if probeset in pre_filtered_db:
if probeset in probeset_translation_db: original_probeset_id = probeset_translation_db[probeset]
else: original_probeset_id = probeset ###When p-values are generated outside of Plier
if original_probeset_id in constitutive_db:
percent_present = pre_filtered_db[probeset]
if percent_present > 0.99: exp_dbase[original_probeset_id] = exp_vals
#else: print percent_present,original_probeset_id; kill
else: exp_dbase[original_probeset_id] = exp_vals
elif data[0] != '#' and x == 0: ###Grab labels
array_names = []
tab_delimited_data = string.split(data,'\t')
for entry in tab_delimited_data: array_names.append(entry)
x += 1
except IOError: exp_dbase = exp_dbase
print len(exp_dbase),"probesets imported with expression values"
###If the arrayid column header is missing, account for this
if len(array_names) == z:
array_names = array_names[1:]
null,filename = string.split(filename1,'\\')
filtered_exp_export = 'R_expression_raw_data\\'+filename[:-4]+'-filtered.txt'
fn=filepath(filtered_exp_export); data = open(fn,'w'); title = 'probeset_id'
for array in array_names: title = title +'\t'+ array
data.write(title+'\n')
for probeset in exp_dbase:
exp_vals = probeset
for exp_val in exp_dbase[probeset]:
exp_vals = exp_vals +'\t'+ str(exp_val)
data.write(exp_vals+'\n')
data.close()
#return filtered_exp_export
def convertToLog2(data_list):
new_list=[]
for item in data_list:
new_list.append(math.log(float(item)+1,2))
return new_list
def getAnnotations(filename,p,Species,Analysis_Method,constitutive_db):
global species; species = Species
global analysis_method; analysis_method = Analysis_Method
array_type = 'AltMouse'
filtered_junctions_list = ExonArray.getFilteredExons(filename,p)
probe_id_translation_file = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'-probeset_translation.txt'
filtered_exp_export_file = filterExpressionData(filename,probe_id_translation_file,filtered_junctions_list,constitutive_db)
return filtered_exp_export_file
def altCleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
return data
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def importGeneric(filename):
verifyFile(filename,None)
fn=filepath(filename); key_db = {}; x = 0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if len(t[1:]) == 1:
try: key_db[t[0]].append(t[1])
except KeyError: key_db[t[0]] = [t[1]]
else: key_db[t[0]] = t[1:]
return key_db
def eliminate_redundant_dict_values(database):
db1={}
for key in database:
list = unique.unique(database[key])
list.sort()
db1[key] = list
return db1
def importAnnotateCriticalExonSequences(species,array_type):
ensembl_associations = importArrayAnnotations(species,array_type)
filename = 'AltDatabase/'+species+'/'+array_type+'/'+ array_type+'_critical-exon-seq.txt'
critical_exon_seq_db = importCriticalExonSeq(filename,array_type,ensembl_associations)
return critical_exon_seq_db
def importArrayAnnotations(species,array_type):
primary_gene_annotation_file = 'AltDatabase/'+species +'/'+ array_type +'/'+ array_type+ '_gene_annotations.txt'
ensembl_array_gene_annotation_file = 'AltDatabase/'+species+'/'+ array_type + '/'+array_type+ '-Ensembl.txt'
ensembl_annotations = 'AltDatabase/ensembl/'+ species + '/'+species+ '_Ensembl-annotations_simple.txt'
verifyFile(primary_gene_annotation_file,array_type)
verifyFile(ensembl_array_gene_annotation_file,array_type)
verifyFile(ensembl_annotations,array_type)
array_gene_annotations = importGeneric(primary_gene_annotation_file)
ensembl_associations = importGeneric(ensembl_array_gene_annotation_file)
ensembl_annotation_db = importGeneric(ensembl_annotations)
ensembl_symbol_db={}
for ens_geneid in ensembl_annotation_db:
description, symbol = ensembl_annotation_db[ens_geneid]
#print symbol;klll
if len(symbol)>0:
try: ensembl_symbol_db[symbol].append(ens_geneid)
except KeyError: ensembl_symbol_db[symbol] =[ens_geneid]
### Update array Ensembl annotations
for array_geneid in array_gene_annotations:
t = array_gene_annotations[array_geneid]; description=t[0];entrez=t[1];symbol=t[2]
if symbol in ensembl_symbol_db:
ens_geneids = ensembl_symbol_db[symbol]
for ens_geneid in ens_geneids:
try: ensembl_associations[array_geneid].append(ens_geneid)
except KeyError: ensembl_associations[array_geneid] = [ens_geneid]
ensembl_associations = eliminate_redundant_dict_values(ensembl_associations)
exportArrayIDEnsemblAssociations(ensembl_associations,species,array_type) ###Use these For LinkEST program
return ensembl_associations
def exportDB(filename,db):
fn=filepath(filename); data = open(fn,'w')
for key in db:
try: values = string.join([key]+db[key],'\t')+'\n'; data.write(values)
except Exception: print key,db[key];sys.exit()
data.close()
def exportFiltered(db):
filename = db['filename']; title = db['title']
filename = string.replace(filename,'.txt','-filtered.txt')
print 'Writing',filename
del db['filename']; del db['title']
fn=filepath(filename); data = open(fn,'w'); data.write(title)
for (old,new) in db:
for line in db[(old,new)]: ### Replace the old ID with the new one
if old not in line and '|' in old:
old = old[:-2]
if ('miR-'+new) in line: ### Occurs when the probeset is a number found in the miRNA name
line = string.replace(line,'miR-'+new,'miR-'+old)
line = string.replace(line,old,new); data.write(line)
data.close()
def exportArrayIDEnsemblAssociations(ensembl_associations,species,array_type):
annotation_db_filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'-Ensembl_relationships.txt'
fn=filepath(annotation_db_filename); data = open(fn,'w')
title = ['ArrayGeneID','Ensembl']; title = string.join(title,'\t')+'\n'
data.write(title)
for array_geneid in ensembl_associations:
for ens_geneid in ensembl_associations[array_geneid]:
values = [array_geneid,ens_geneid]; values = string.join(values,'\t')+'\n'; data.write(values)
data.close()
def exportCriticalExonLocations(species,array_type,critical_exon_seq_db):
location_db_filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'_critical_exon_locations.txt'
fn=filepath(location_db_filename); data = open(fn,'w')
title = ['Affygene','ExonID','Ensembl','start','stop','gene-start','gene-stop','ExonSeq']; title = string.join(title,'\t')+'\n'
data.write(title)
for ens_geneid in critical_exon_seq_db:
for cd in critical_exon_seq_db[ens_geneid]:
try:
values = [cd.ArrayGeneID(),cd.ExonID(),ens_geneid,cd.ExonStart(),cd.ExonStop(),cd.GeneStart(), cd.GeneStop(), cd.ExonSeq()]
values = string.join(values,'\t')+'\n'
data.write(values)
except AttributeError:
#print cd.ArrayGeneID(), cd.ExonID()
#print cd.ExonStart(),cd.ExonStop(),cd.GeneStart(), cd.GeneStop(), cd.ExonSeq()
#sys.exit()
pass
data.close()
class ExonSeqData:
def __init__(self,exon,array_geneid,probeset_id,critical_junctions,critical_exon_seq):
self._exon = exon; self._array_geneid = array_geneid; self._critical_junctions = critical_junctions
self._critical_exon_seq = critical_exon_seq; self._probeset_id = probeset_id
def ProbesetID(self): return self._probeset_id
def ArrayGeneID(self): return self._array_geneid
def ExonID(self): return self._exon
def CriticalJunctions(self): return self._critical_junctions
def ExonSeq(self): return string.upper(self._critical_exon_seq)
def setExonStart(self,exon_start):
try: self._exon_start = self._exon_start ### If it already is set from the input file, keep it
except Exception: self._exon_start = exon_start
def setExonStop(self,exon_stop):
try: self._exon_stop = self._exon_stop ### If it already is set from the input file, keep it
except Exception: self._exon_stop = exon_stop
def setGeneStart(self,gene_start): self._gene_start = gene_start
def setGeneStop(self,gene_stop): self._gene_stop = gene_stop
def ExonStart(self): return str(self._exon_start)
def ExonStop(self): return str(self._exon_stop)
def GeneStart(self): return str(self._gene_start)
def GeneStop(self): return str(self._gene_stop)
def importCriticalExonSeq(filename,array_type,ensembl_associations):
verifyFile(filename,array_type)
fn=filepath(filename); key_db = {}; x = 0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x == 0: x = 1
else:
arraygeneid_exon,critical_junctions,critical_exon_seq = string.split(data,'\t')
if len(critical_exon_seq)>5:
array_geneid, exon = string.split(arraygeneid_exon,':')
if array_geneid in ensembl_associations:
ens_geneids = ensembl_associations[array_geneid]
for ens_geneid in ens_geneids:
seq_data = ExonSeqData(exon,array_geneid,arraygeneid_exon,critical_junctions,critical_exon_seq)
try: key_db[ens_geneid].append(seq_data)
except KeyError: key_db[ens_geneid] = [seq_data]
return key_db
def updateCriticalExonSequences(array_type, filename,ensembl_probeset_db):
exon_seq_db_filename = filename[:-4]+'_updated.txt'
fn=filepath(exon_seq_db_filename); data = open(fn,'w')
critical_exon_seq_db={}
for ens_gene in ensembl_probeset_db:
for probe_data in ensembl_probeset_db[ens_gene]:
exon_id,((probe_start,probe_stop,probeset_id,exon_class,transcript_clust),ed) = probe_data
try: critical_exon_seq_db[probeset_id] = ed.ExonSeq()
except AttributeError: null=[] ### Occurs when no sequence data is associated with exon (probesets without exon associations)
ensembl_probeset_db=[]; key_db = {}; x = 0
if array_type == 'AltMouse':
fn1=filepath(filename)
verifyFile(filename,array_type)
for line in open(fn1,'rU').xreadlines():
line_data = cleanUpLine(line)
if x == 0: x = 1; data.write(line)
else:
arraygeneid_exon,critical_junctions,critical_exon_seq = string.split(line_data,'\t')
if arraygeneid_exon in critical_exon_seq_db:
critical_exon_seq = critical_exon_seq_db[arraygeneid_exon]
values = [arraygeneid_exon,critical_junctions,critical_exon_seq]
values = string.join(values,'\t')+'\n'
data.write(values)
else: data.write(line)
elif array_type == 'junction':
### We don't need any of the additional information used for AltMouse arrays
for probeset in critical_exon_seq_db:
critical_exon_seq = critical_exon_seq_db[probeset]
if ':' in probeset:
probeset = string.split(probeset,':')[1]
values = [probeset,'',critical_exon_seq]
values = string.join(values,'\t')+'\n'
data.write(values)
data.close()
print exon_seq_db_filename, 'exported....'
def inferJunctionComps(species,array_type,searchChr=None):
if len(array_type) == 3:
### This indicates that the ensembl_probeset_db is already included
array_type,ensembl_probeset_db,root_dir = array_type
comps_type = ''
else:
export_exon_filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_probesets.txt'
ensembl_probeset_db = ExonArrayEnsemblRules.reimportEnsemblProbesetsForSeqExtraction(export_exon_filename,'junction-regions',{})
comps_type = 'updated'; root_dir = ''
if array_type != 'RNASeq':
print "Import junction probeset region IDs for",species
print "Preparing region IDs for analysis of possible reciprocal junctions"
putative_as_junction_db={}; probeset_juntion_db={}; common_exon_blocks_exon={}; common_exon_blocks_intron={}; count=0
for gene in ensembl_probeset_db:
for (probeset,regionid) in ensembl_probeset_db[gene]:
regionids = string.split(regionid,'|')
for regionid in regionids:
if '-' in regionid:
novel_5p=False; novel_3p=False
if 'I' in regionid: exons_type = 'exon-intron'
else: exons_type = 'exons'
exon_5prime_original, exon_3prime_original = string.split(regionid,'-')
exon_5prime = string.split(exon_5prime_original,'.')
if '_' in exon_5prime[1]:
exon_5prime[1] = float(string.replace(exon_5prime[1],'_','.'))
novel_5p=True
else: exon_5prime[1] = int(exon_5prime[1])
e1a3 = (int(exon_5prime[0][1:]),int(exon_5prime[1])) ### The first is an int for the region - since it hybs early
e1a5 = (int(exon_5prime[0][1:]),exon_5prime[1])
e1 = e1a3, e1a5
exon_3prime = string.split(exon_3prime_original,'.')
if '_' in exon_3prime[1]:
exon_3prime[1] = float(string.replace(exon_3prime[1],'_','.'))
novel_3p=True
else:
try: exon_3prime[1] = int(exon_3prime[1])
except Exception: print exon_3prime;kill
e2a3 = (int(exon_3prime[0][1:]),exon_3prime[1])
e2a5 = (int(exon_3prime[0][1:]),int(exon_3prime[1])) ### The second is an int for the region - since it hybs late
e2 = e2a3, e2a5
if exons_type == 'exons':
if novel_5p and novel_3p:
None ### Ignore junctions where both the 5' and 3' splice sites are novel -> like false positives
### If you include these with novel junction discovery in TopHat, you can get a huge memory issue in compareJunctions
else:
count+=1
try: putative_as_junction_db[gene].append((e1,e2))
except Exception: putative_as_junction_db[gene] = [(e1,e2)]
### This matches the recorded junction ID from EnsemblImport.compareJunctions()
try: probeset_juntion_db[gene,(e1a5,e2a3)].append(probeset)
except Exception: probeset_juntion_db[gene,(e1a5,e2a3)] = [probeset]
### Defines exon-intron and exon-exon reciprical junctions based on shared exon blocks
block = e1a3[0]; side = 'left'
try: common_exon_blocks_exon[side,gene,block].append([regionid,probeset])
except KeyError: common_exon_blocks_exon[side,gene,block] = [[regionid,probeset]]
block = e2a3[0]; side = 'right'
try: common_exon_blocks_exon[side,gene,block].append([regionid,probeset])
except KeyError: common_exon_blocks_exon[side,gene,block] = [[regionid,probeset]]
else:
### Defines exon-intron and exon-exon reciprical junctions based on shared exon blocks
### In 2.0.8 we expanded the search criterion here so that each side and exon-block are searched for matching junctions (needed for confirmatory novel exons)
if 'I' in exon_5prime or 'I' in exon_5prime[0]: ### Can be a list with the first object being the exon annotation
block = e2a3[0]; side = 'right'; critical_intron = exon_5prime_original
alt_block = e1a3[0]; alt_side = 'left'
else:
block = e1a3[0]; side = 'left'; critical_intron = exon_3prime_original
alt_block = e2a3[0]; alt_side = 'right'
#if gene == 'ENSG00000112695':
#print critical_intron,regionid,probeset, exon_5prime_original, exon_3prime_original, exon_5prime
try: common_exon_blocks_intron[side,gene,block].append([regionid,probeset,critical_intron])
except KeyError: common_exon_blocks_intron[side,gene,block] = [[regionid,probeset,critical_intron]]
### Below added in 2.0.8 to accomidate for a broader comparison of reciprocol splice junctions
try: common_exon_blocks_intron[alt_side,gene,alt_block].append([regionid,probeset,critical_intron])
except KeyError: common_exon_blocks_intron[alt_side,gene,alt_block] = [[regionid,probeset,critical_intron]]
if array_type != 'RNASeq':
print count, 'probed junctions being compared to identify putative reciprocal junction comparisons'
critical_exon_db, critical_gene_junction_db = EnsemblImport.compareJunctions(species,putative_as_junction_db,{},rootdir=root_dir, searchChr=searchChr)
if array_type != 'RNASeq':
print len(critical_exon_db),'genes with alternative reciprocal junctions pairs found'
global junction_inclusion_db; count=0; redundant=0; junction_annotations={}; critical_exon_annotations={}
junction_inclusion_db = JunctionArrayEnsemblRules.reimportJunctionComps(species,array_type,(comps_type,ensembl_probeset_db))
for gene in critical_exon_db:
for sd in critical_exon_db[gene]:
junction_pairs = getJunctionPairs(sd.Junctions())
"""
if len(junction_pairs)>1 and len(sd.CriticalExonRegion())>1:
print
.Junctions()
print sd.CriticalExonRegion();kill"""
for (junction1,junction2) in junction_pairs:
critical_exon = sd.CriticalExonRegion()
excl_junction,incl_junction = determineExclIncl(junction1,junction2,critical_exon)
incl_junction_probeset = probeset_juntion_db[gene,incl_junction][0]
excl_junction_probeset = probeset_juntion_db[gene,excl_junction][0]
source = 'Inferred'
incl_junction=formatJunctions(incl_junction)
excl_junction=formatJunctions(excl_junction)
critical_exon=string.replace(formatJunctions(critical_exon),'-','|'); count+=1
ji=JunctionArrayEnsemblRules.JunctionInformation(gene,critical_exon,excl_junction,incl_junction,excl_junction_probeset,incl_junction_probeset,source)
#if gene == 'ENSG00000112695':# and 'I' in critical_exon:
#print critical_exon,'\t', incl_junction,'\t',excl_junction_probeset,'\t',incl_junction_probeset
if (excl_junction_probeset,incl_junction_probeset) not in junction_inclusion_db:
try: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset].append(ji)
except KeyError: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset] = [ji]
junction_str = string.join([excl_junction,incl_junction],'|')
#splice_event_str = string.join(sd.SpliceType(),'|')
try: junction_annotations[ji.InclusionProbeset()].append((junction_str,sd.SpliceType()))
except KeyError: junction_annotations[ji.InclusionProbeset()] = [(junction_str,sd.SpliceType())]
try: junction_annotations[ji.ExclusionProbeset()].append((junction_str,sd.SpliceType()))
except KeyError: junction_annotations[ji.ExclusionProbeset()] = [(junction_str,sd.SpliceType())]
critical_exons = string.split(critical_exon,'|')
for critical_exon in critical_exons:
try: critical_exon_annotations[gene+':'+critical_exon].append((junction_str,sd.SpliceType()))
except KeyError: critical_exon_annotations[gene+':'+critical_exon] = [(junction_str,sd.SpliceType())]
else: redundant+=1
if array_type != 'RNASeq':
print count, 'Inferred junctions identified with',redundant, 'redundant.'
### Compare exon and intron blocks for intron alinging junctions
junction_inclusion_db = annotateNovelIntronSplicingEvents(common_exon_blocks_intron,common_exon_blocks_exon,junction_inclusion_db)
if len(root_dir)>0: exportUpdatedJunctionComps((species,root_dir),array_type,searchChr=searchChr)
else: exportUpdatedJunctionComps(species,array_type)
clearObjectsFromMemory(junction_inclusion_db); junction_inclusion_db=[]
if array_type == 'RNASeq':
### return these annotations for RNASeq analyses
return junction_annotations,critical_exon_annotations
def annotateNovelIntronSplicingEvents(common_exon_blocks_intron,common_exon_blocks_exon,junction_inclusion_db):
### Add exon-intron, exon-exon reciprical junctions determined based on common block exon (same side of the junction)
new_intron_events=0
for key in common_exon_blocks_intron:
(side,gene,block) = key; source='Inferred-Intron'
if key in common_exon_blocks_exon:
for (excl_junction,excl_junction_probeset) in common_exon_blocks_exon[key]:
for (incl_junction,incl_junction_probeset,critical_intron) in common_exon_blocks_intron[key]:
#if gene == 'ENSG00000112695':# and 'E2.9-E3.1' in excl_junction_probeset:
#print critical_intron,'\t', incl_junction,'\t',excl_junction_probeset,'\t',incl_junction_probeset,'\t',side,'\t',gene,block
ji=JunctionArrayEnsemblRules.JunctionInformation(gene,critical_intron,excl_junction,incl_junction,excl_junction_probeset,incl_junction_probeset,source)
if (excl_junction_probeset,incl_junction_probeset) not in junction_inclusion_db:
try: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset].append(ji)
except Exception: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset] = [ji]
new_intron_events+=1
#print new_intron_events, 'novel intron-splicing events added to database'
"""
### While the below code seemed like a good idea, the current state of RNA-seq alignment tools produced a rediculous amount of intron-intron junctions (usually in the same intron)
### Without supporting data (e.g., other junctions bridging these intron junction to a validated exon), we must assume these juncitons are not associated with the alinging gene
new_intron_events=0 ### Compare Intron blocks to each other
for key in common_exon_blocks_intron:
(side,gene,block) = key; source='Inferred-Intron'
for (excl_junction,excl_junction_probeset,critical_intron1) in common_exon_blocks_intron[key]:
for (incl_junction,incl_junction_probeset,critical_intron2) in common_exon_blocks_intron[key]:
if (excl_junction,excl_junction_probeset) != (incl_junction,incl_junction_probeset): ### If comparing entries in the same list, don't compare an single entry to itself
ji=JunctionArrayEnsemblRules.JunctionInformation(gene,critical_intron1+'|'+critical_intron2,excl_junction,incl_junction,excl_junction_probeset,incl_junction_probeset,source)
if (excl_junction_probeset,incl_junction_probeset) not in junction_inclusion_db and (incl_junction_probeset,excl_junction_probeset) not in junction_inclusion_db:
try: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset].append(ji)
except Exception: junction_inclusion_db[excl_junction_probeset,incl_junction_probeset] = [ji]
new_intron_events+=1
"""
#print new_intron_events, 'novel intron-splicing events added to database'
return junction_inclusion_db
def determineExclIncl(junction1,junction2,critical_exons):
#((3, 2), (6, 1))
for critical_exon in critical_exons:
if critical_exon in junction1: incl_junction = junction1; excl_junction = junction2
if critical_exon in junction2: incl_junction = junction2; excl_junction = junction1
try: return excl_junction,incl_junction
except Exception:
print critical_exons
print junction1
print junction2
print 'Warning... Unknown error. Contact AltAnalyze support for assistance.'
sys.exit()
def formatJunctions(junction):
#((3, 2), (6, 1))
exons_to_join=[]
for i in junction:
exons_to_join.append('E'+str(i[0])+'.'+string.replace(str(i[1]),'.','_'))
junction_str = string.join(exons_to_join,'-')
return junction_str
def getJunctionPairs(junctions):
### Although the pairs of junctions (exclusion 1st, inclusion 2nd) are given, need to separate out the pairs to report reciprical junctions
# (((3, 2), (6, 1)), ((4, 2), (6, 1)), ((3, 2), (6, 1)), ((4, 2), (6, 1))))
count = 0; pairs=[]; pairs_db={}
for junction in junctions:
count +=1; pairs.append(junction)
if count==2: pairs_db[tuple(pairs)]=[]; count = 0; pairs=[]
return pairs_db
def getJunctionExonLocations(species,array_type,specific_array_type):
global ensembl_associations
ensembl_associations = importJunctionArrayAnnotations(species,array_type,specific_array_type)
extraction_type = 'sequence'
exon_seq_db=importJunctionArrayAnnotationMappings(array_type,specific_array_type,species,extraction_type)
if 'HTA' in specific_array_type or 'MTA' in specific_array_type:
exportImportedProbesetLocations(species,array_type,exon_seq_db,ensembl_associations)
getLocations(species,array_type,exon_seq_db)
def exportImportedProbesetLocations(species,array_type,critical_exon_seq_db,ensembl_associations):
location_db_filename = 'AltDatabase/'+species+'/'+array_type+'/'+array_type+'_critical_exon_locations-original.txt'
fn=filepath(location_db_filename); data = open(fn,'w')
title = ['Affygene','ExonID','Ensembl','start','stop','gene-start','gene-stop','ExonSeq']; title = string.join(title,'\t')+'\n'
data.write(title)
for ens_geneid in critical_exon_seq_db:
for cd in critical_exon_seq_db[ens_geneid]:
try:
values = [cd.ArrayGeneID(),cd.ExonID(),ens_geneid,cd.ExonStart(),cd.ExonStop(),cd.GeneStart(), cd.GeneStop(), cd.ExonSeq()]
values = string.join(values,'\t')+'\n'
data.write(values)
except AttributeError: null = []
data.close()
def identifyCriticalExonLocations(species,array_type):
critical_exon_seq_db = importAnnotateCriticalExonSequences(species,array_type)
getLocations(species,array_type,critical_exon_seq_db)
def getLocations(species,array_type,critical_exon_seq_db):
analysis_type = 'get_locations'
dir = 'AltDatabase/'+species+'/SequenceData/chr/'+species; gene_seq_filename = dir+'_gene-seq-2000_flank.fa'
critical_exon_seq_db = EnsemblImport.import_sequence_data(gene_seq_filename,critical_exon_seq_db,species,analysis_type)
exportCriticalExonLocations(species,array_type,critical_exon_seq_db)
def reAnnotateCriticalExonSequences(species,array_type):
export_exon_filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_Ensembl_'+array_type+'_probesets.txt'
ensembl_probeset_db = ExonArrayEnsemblRules.reimportEnsemblProbesetsForSeqExtraction(export_exon_filename,'null',{})
#analysis_type = 'get_sequence'
analysis_type = ('region_only','get_sequence') ### Added after EnsMart65
dir = 'AltDatabase/'+species+'/SequenceData/chr/'+species; gene_seq_filename = dir+'_gene-seq-2000_flank.fa'
ensembl_probeset_db = EnsemblImport.import_sequence_data(gene_seq_filename,ensembl_probeset_db,species,analysis_type)
critical_exon_file = 'AltDatabase/'+species+'/'+ array_type + '/' + array_type+'_critical-exon-seq.txt'
if array_type == 'AltMouse': verifyFile(critical_exon_file,array_type)
updateCriticalExonSequences(array_type, critical_exon_file, ensembl_probeset_db)
if __name__ == '__main__':
"""Module has methods for annotating Junction associated critical exon sequences with up-to-date genome coordinates and analysis options for
junciton arrays from AnalyzeExpressionDatasets"""
m = 'Mm'; h = 'Hs'; species = h; array_type = 'junction' ###In theory, could be another type of junction or combination array
specific_array_type = 'hGlue'
extraction_type = 'comparisons'
filename = 'AltDatabase/'+species+'/'+array_type+'/'+species+'_LiftOverEnsembl.txt'
verifyFile(filename,array_type+'/'+specific_array_type);sys.exit()
tc_ensembl_annotations = importJunctionArrayAnnotationMappings(array_type,specific_array_type,species,extraction_type); sys.exit()
combineExonJunctionAnnotations(species,array_type);sys.exit()
filterForCriticalExons(species,array_type)
overRideExonEntriesWithJunctions(species,array_type);sys.exit()
#inferJunctionComps(species,array_type); sys.exit()
identifyJunctionComps(species,array_type,specific_array_type);sys.exit()
filterForCriticalExons(species,array_type);sys.exit()
reAnnotateCriticalExonSequences(species,array_type)
#getJunctionExonLocations(species,array_type,specific_array_type)
sys.exit()
import_dir = '/AltDatabase/exon/'+species; expr_file_dir = 'R_expression_raw_data\exp.altmouse_es-eb.dabg.rma.txt'
dagb_p = 1; Analysis_Method = 'rma'
#identifyCriticalExonLocations(species,array_type)
#JunctionArrayEnsemblRules.getAnnotations(species,array_type)
### Only needs to be run once, to update the original
#reAnnotateCriticalExonSequences(species,array_type); sys.exit()
#getAnnotations(expr_file_dir,dagb_p,Species,Analysis_Method)
| apache-2.0 |
Fireblend/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 373 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
mgraffg/simplegp | SimpleGP/bayes.py | 1 | 16202 | # Copyright 2013 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from SimpleGP.forest import SubTreeXO
from SimpleGP.simplegp import GPS
from SimpleGP.sparse_array import SparseArray
import numpy as np
import array
import math
class Bayes(GPS, SubTreeXO):
def __init__(self, ntrees=5, nrandom=0, max_length=1024, ncl=None,
class_freq=None, use_st=0,
seed=0, **kwargs):
super(Bayes, self).__init__(ntrees=ntrees, nrandom=nrandom,
max_length=max_length, seed=seed,
**kwargs)
if use_st == 1:
raise NotImplementedError('Cache is not implemented')
self._elm_constants = None
self._save_ind = []
self._ncl = ncl
self._class_freq = class_freq
if self._class_freq is not None:
assert len(self._class_freq) == self._ncl
self._class_freq_test = None
def fitness_validation(self, k):
"""
Fitness function used in the validation set.
In this case it is the one used on the evolution
"""
if self._class_freq_test is None:
self._class_freq_test = self._test_set_y.class_freq(self._ncl)
cnt = self._test_set_y.size()
y = self._test_set_y
return - y.BER(self._pr_test_set[:cnt], self._class_freq_test)
def save_ind(self, k):
self._save_ind = [self.population[k],
self._p_constants[k],
self._elm_constants[k],
self._fitness[k],
self._class_freq]
def restore_ind(self, k):
ind = self._save_ind
self.population[k] = ind[0]
self._p_constants[k] = ind[1]
self._elm_constants[k] = ind[2]
self._fitness[k] = ind[3]
self._class_freq = ind[4]
def train(self, *args, **kwargs):
super(Bayes, self).train(*args, **kwargs)
self._nop[self._output_pos] = self._ntrees
if self._ncl is None:
self._ncl = np.unique(self._f.tonparray()).shape[0]
if self._class_freq is None:
self._class_freq = self._f.class_freq(self._ncl)
tot = sum(self._class_freq)
self._log_class_prior = array.array('d', map(lambda x:
math.log(x / tot),
self._class_freq))
self._class_prior = array.array('d', map(lambda x:
x / tot,
self._class_freq))
return self
def create_population(self):
if self._elm_constants is None or\
self._elm_constants.shape[0] != self._popsize:
self._elm_constants = np.empty(self._popsize,
dtype=np.object)
return super(Bayes, self).create_population()
def early_stopping_save(self, k, fit_k=None):
"""
Storing the best so far on the validation set.
This funtion is called from early_stopping
"""
assert fit_k is not None
self._early_stopping = [fit_k,
self.population[k].copy(),
self._p_constants[k].copy(),
self._elm_constants[k],
self._class_freq]
def set_early_stopping_ind(self, ind, k=0):
if self._best == k:
raise "Losing the best so far"
self.population[k] = ind[1]
self._p_constants[k] = ind[2]
self._elm_constants[k] = ind[3]
self._class_freq = ind[4]
tot = sum(self._class_freq)
self._log_class_prior = array.array('d', map(lambda x:
math.log(x / tot),
self._class_freq))
self._class_prior = array.array('d', map(lambda x:
x / tot,
self._class_freq))
def predict_log_proba(self, k, X=None):
from sklearn.utils.extmath import logsumexp
jll = self.joint_log_likelihood(k, X=X)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict(self, X, ind=None):
if ind is not None:
fit_k = self._fitness[ind]
self._fitness[ind] = 1
pr = super(Bayes, self).predict(X, ind=ind)
if ind is not None:
self._fitness[ind] = fit_k
return pr
def predict_proba(self, X=None, ind=None):
if ind is None:
ind = self.best
fit_k = self._fitness[ind]
self._fitness[ind] = 1
pr = np.exp(self.predict_log_proba(ind, X=X))
self._fitness[ind] = fit_k
return pr
def joint_log_likelihood(self, k, X=None):
self._computing_fitness = k
if X is not None:
self._eval.X(X)
super(Bayes, self).eval_ind(self.population[k], pos=0,
constants=self._p_constants[k])
if X is not None:
self._eval.X(self._x)
Xs = self._eval.get_output()
y = self._f
if self._fitness[k] > -np.inf:
[mu, var, index] = self._elm_constants[k]
if len(index) == 0:
return None
elif len(index) < len(Xs):
Xs = map(lambda x: Xs[x], index)
else:
index = filter(lambda x: Xs[x].isfinite(), range(len(Xs)))
if len(index) == 0:
return None
elif len(index) < len(Xs):
Xs = map(lambda x: Xs[x], index)
mu = y.mean_per_cl(Xs, self._class_freq)
var = y.var_per_cl(Xs, mu, self._class_freq)
self._elm_constants[k] = [mu, var, index]
llh = y.joint_log_likelihood(Xs, mu, var, self._log_class_prior)
return llh
def eval_ind(self, ind, **kwargs):
if self._computing_fitness is None:
cdn = "Use eval with the number of individual, instead"
NotImplementedError(cdn)
k = self._computing_fitness
llh = self.joint_log_likelihood(k)
if llh is not None and np.all(np.isfinite(llh)):
return SparseArray.fromlist(llh.argmax(axis=1))
return SparseArray.fromlist(map(lambda x: np.inf,
range(self._eval.get_X()[0].size())))
def distance(self, y, yh):
return y.BER(yh, self._class_freq)
class AdaBayes(Bayes):
def __init__(self, ntimes=2, frac_ts=1.0, **kwargs):
super(AdaBayes, self).__init__(**kwargs)
self._inds = []
self._ntimes = ntimes
self._prob = None
self._X_all = None
self._y_all = None
self._beta_constants = None
self._frac_ts = frac_ts
def create_population(self):
if self._beta_constants is None or\
self._beta_constants.shape[0] != self._popsize:
self._beta_constants = np.empty(self._popsize,
dtype=np.object)
return super(AdaBayes, self).create_population()
def compute_beta(self, ind):
y = self._y_all.tonparray()
yh = super(AdaBayes, self).predict(self._X_all, ind=ind).tonparray()
self._beta_update = y == yh
et = (~ self._beta_update).mean()
self._et = et
if et > 0.95:
return False
self._beta_constants[ind] = et / (1 - et)
return True
def save_ind(self, k):
super(AdaBayes, self).save_ind(k)
self._save_ind.append(self._beta_constants[k])
def restore_ind(self, k):
super(AdaBayes, self).restore_ind(k)
self._beta_constants[k] = self._save_ind[5]
def set_early_stopping_ind(self, ind, k=0):
super(AdaBayes, self).set_early_stopping_ind(ind, k=k)
self._beta_constants[k] = ind[5]
def early_stopping_save(self, k, fit_k=None):
super(AdaBayes, self).early_stopping_save(k, fit_k=fit_k)
self._early_stopping.append(self._beta_constants[k])
if self._et > 0.5:
return
if self._verbose:
print "Best so far", self.gens_ind,\
"%0.4f" % self.early_stopping[0]
self._inds.append(self.early_stopping)
# updating the distribution
beta = self._beta_constants[k]
mask = self._beta_update
self._prob[mask] *= beta
mu = self._prob.sum()
self._prob = self._prob / mu
self._run = False
# updating the training size
# self.train(self._X_all, self._y_all, prob=self._prob)
# self._fitness.fill(-np.inf)
# self._best = None
# self._best_fit = None
def predict_test_set(self, ind):
"""Predicting the test set"""
if not self.compute_beta(ind):
return self._test_set[0].constant(np.inf,
size=self._test_set[0].size())
return self.predict(self._test_set, ind)
def predict(self, X, ind=None):
assert ind is not None
assert self._beta_constants[ind] is not None
score = np.zeros((X[0].size(), self._ncl))
index = np.arange(X[0].size())
k = 0
if self._best is not None and self._best == k:
k += 1
hist = None
for inds in self._inds:
self.save_ind(k)
self.set_early_stopping_ind(inds, k=k)
pr = super(AdaBayes, self).predict(X,
ind=k).tonparray()
beta = math.log(1 / self._beta_constants[k])
score[index, pr.astype(np.int)] += beta
self.restore_ind(k)
hist = [inds[1], inds[5]]
if hist is None or (np.any(hist[0] != self.population[ind]) or
hist[1] != self._beta_constants[ind]):
pr = super(AdaBayes, self).predict(X, ind=ind).tonparray()
if not np.all(np.isfinite(pr)):
return X[0].constant(np.inf,
size=X[0].size())
beta = math.log(1 / self._beta_constants[ind])
score[index, pr.astype(np.int)] += beta
self._score_yh = score
return SparseArray.fromlist(score.argmax(axis=1))
def select_ts(self):
index = None
cnt = int(self._frac_ts * self._X_all[0].size())
while index is None or index.shape[0] < cnt:
a = np.random.uniform(size=self._prob.shape[0])
a = np.where(a < self._prob)[0]
np.random.shuffle(a)
if index is None:
index = a[:cnt]
else:
index = np.concatenate((index, a))
index = index[:cnt]
index.sort()
return index
def train(self, X, y, prob=None, **kwargs):
self._X_all = X
self._y_all = y
if prob is None:
prob = np.empty(y.size())
prob.fill(1. / y.size())
self._prob = prob
else:
self._prob = prob
index = self.select_ts()
return super(AdaBayes, self).train(map(lambda x: x[index], X),
y[index], **kwargs)
def fit(self, X, y, test=None, callback=None, callback_args=None,
test_y=None, **kwargs):
if test is not None:
self.set_test(test, y=test_y)
ntimes = self._ntimes
fit = -np.inf
prob = None
for i in range(ntimes):
self._ntimes = i
self.train(X, y, prob=prob)
self.create_population()
self.init()
self.run()
if self.early_stopping[0] <= fit:
self._p = None
continue
prob = self._prob
fit = self.early_stopping[0]
self._p = None
if callback:
if callback_args is None:
callback(self)
else:
callback(self, *callback_args)
self._ntimes = ntimes
return self
class IBayes(Bayes):
def __init__(self, ntimes=2, **kwargs):
super(IBayes, self).__init__(**kwargs)
self._inds = []
self._prev_f = None
self._prev_index = None
self._ntimes = ntimes
def prev_llh(self, llh):
self._prev_index = llh.argmax(axis=1)
self._prev_f = llh.max(axis=1)
def predict_llh(self, X=None, ind=None):
k = ind
res = [self.joint_log_likelihood(k, X=X)]
k = 0
if self._best is not None and self._best == k:
k += 1
for ind in self._inds:
self.save_ind(k)
self.set_early_stopping_ind(ind, k=k)
res.append(self.joint_log_likelihood(k, X=X))
self.restore_ind(k)
return np.concatenate(res, axis=1)
def predict_proba(self, X=None, ind=None):
res = [super(IBayes, self).predict_proba(X=X, ind=ind)]
k = 0
if self._best is not None and self._best == k:
k += 1
for ind in self._inds:
self.save_ind(k)
self.set_early_stopping_ind(ind, k=k)
res.append(super(IBayes, self).predict_proba(X=X, ind=k))
self.restore_ind(k)
return np.concatenate(res, axis=1)
def predict(self, X=None, ind=None):
a = self.predict_proba(X=X, ind=ind)
return SparseArray.fromlist(a.argmax(axis=1) % self._ncl)
def eval_ind(self, ind, **kwargs):
if self._computing_fitness is None:
cdn = "Use eval with the number of individual, instead"
NotImplementedError(cdn)
k = self._computing_fitness
llh = self.joint_log_likelihood(k)
if self._prev_f is None:
if llh is not None and np.all(np.isfinite(llh)):
return SparseArray.fromlist(llh.argmax(axis=1))
else:
return SparseArray.fromlist(map(lambda x: np.inf,
range(self._x[0].size())))
if llh is None or not np.all(np.isfinite(llh)):
yh = self._prev_index
return SparseArray.fromlist(yh)
a = np.vstack((self._prev_index, llh.argmax(axis=1)))
f = np.vstack((self._prev_f, llh[np.arange(llh.shape[0]), a[1]]))
yh = a[f.argmax(axis=0), np.arange(f.shape[1])]
return SparseArray.fromlist(yh)
def fit(self, X, y, test=None, callback=None, callback_args=None,
test_y=None, **kwargs):
if test is not None:
self.set_test(test, y=test_y)
ntimes = self._ntimes
fit = -np.inf
for i in range(ntimes):
self._ntimes = i
self.train(X, y)
self.create_population()
self.init()
self.run()
if self.early_stopping[0] <= fit:
break
fit = self.early_stopping[0]
self.population[self.best] = self.early_stopping[1]
self._p_constants[self.best] = self.early_stopping[2]
self._elm_constants[self.best] = self.early_stopping[3]
llh = self.predict_llh(self._x, ind=self.best)
self.prev_llh(llh)
self._inds.append(map(lambda x: x, self.early_stopping))
if callback:
if callback_args is None:
callback(self)
else:
callback(self, *callback_args)
self._ntimes = ntimes
return self
| apache-2.0 |
thientu/scikit-learn | sklearn/utils/estimator_checks.py | 6 | 52087 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils import ConvergenceWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
try:
assert_warns(DeprecationWarning,
getattr(estimator, method), X[0])
except ValueError:
pass
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError :
pass
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required."
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
ales-erjavec/orange | setup.py | 6 | 31938 | #!/usr/bin/env python2
try:
import distribute_setup
# require distutils >= 0.6.26 or setuptools >= 0.7
distribute_setup.use_setuptools(version='0.6.26')
except ImportError:
# For documentation we load setup.py to get version
# so it does not matter if importing fails
pass
import glob, os, sys, types
from distutils import log
from distutils.command.build import build
from distutils.command.build_ext import build_ext
from distutils.command.install_lib import install_lib
from distutils.dep_util import newer_group
from distutils.errors import DistutilsSetupError
from distutils.file_util import copy_file
from distutils.msvccompiler import MSVCCompiler
from distutils.unixccompiler import UnixCCompiler
from distutils.util import convert_path
from distutils.sysconfig import get_python_inc, get_config_var
import subprocess
from subprocess import check_call
from collections import namedtuple
from ConfigParser import SafeConfigParser
from setuptools import setup, find_packages
from setuptools.command.install import install
# Has to be last import as it seems something is changing it somewhere
from distutils.extension import Extension
NAME = 'Orange'
VERSION = '2.7.8'
ISRELEASED = False
DESCRIPTION = 'Orange, a component-based data mining framework.'
LONG_DESCRIPTION = open(os.path.join(os.path.dirname(__file__), 'README.txt')).read()
AUTHOR = 'Bioinformatics Laboratory, FRI UL'
AUTHOR_EMAIL = '[email protected]'
URL = 'http://orange.biolab.si/'
DOWNLOAD_URL = 'https://bitbucket.org/biolab/orange/downloads'
LICENSE = 'GPLv3'
KEYWORDS = (
'data mining',
'machine learning',
'artificial intelligence',
)
CLASSIFIERS = (
'Development Status :: 4 - Beta',
'Environment :: X11 Applications :: Qt',
'Environment :: Console',
'Environment :: Plugins',
'Programming Language :: Python',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
)
try:
import numpy
numpy_include_dir = numpy.get_include()
except ImportError:
# When setup.py is first run to install orange, numpy can still be missing
pass
numpy_include_dir = None
python_include_dir = get_python_inc(plat_specific=1)
include_dirs = [python_include_dir, numpy_include_dir, 'source/include']
if sys.platform == 'darwin':
extra_compile_args = '-fPIC -fno-common -w -DDARWIN'.split()
extra_link_args = '-headerpad_max_install_names -undefined dynamic_lookup'.split()
elif sys.platform == 'win32':
extra_compile_args = ['-EHsc']
extra_link_args = []
elif sys.platform.startswith('linux'):
extra_compile_args = '-fPIC -w -DLINUX'.split()
extra_link_args = ['-Wl,-R$ORIGIN']
else:
extra_compile_args = []
extra_link_args = []
lib_cfg = namedtuple(
"lib_cfg", ["libraries", "library_dirs", "include_dirs"])
site_cfg = namedtuple(
"site_cfg", ["libsvm", "liblinear", "blas", "qhull"])
def libs_parse(text):
return [lib.strip() for lib in text.strip().split()]
def dirs_parse(text):
return text.strip().split(os.path.pathsep)
def parse_lib_opt(parser, section):
libs, library_dirs, include_dirs = [], [], []
if parser.has_option(section, "libraries"):
libs = libs_parse(parser.get(section, "libraries"))
elif parser.has_option(section, "library"):
libs = libs_parse(parser.get(section, "library"))
if parser.has_option(section, "library_dirs"):
library_dirs = \
dirs_parse(parser.get(section, "library_dirs"))
if parser.has_option(section, "include_dirs"):
include_dirs = dirs_parse(parser.get(section, "include_dirs"))
if libs or library_dirs or include_dirs:
return lib_cfg(libs, library_dirs, include_dirs)
else:
return None
def site_config():
"""Return the parsed site configuration.
"""
parser = SafeConfigParser()
parser.read(["setup-site.cfg",
os.path.expanduser("~/.orange-site.cfg")])
libsvm = parse_lib_opt(parser, "libsvm")
liblinear = parse_lib_opt(parser, "liblinear")
blas = parse_lib_opt(parser, "blas")
qhull = parse_lib_opt(parser, "qhull")
return site_cfg(libsvm, liblinear, blas, qhull)
# Get the command for building orangeqt extension from
# source/orangeqt/setup.py file.
# Fails without PyQt4.
import imp
try:
orangeqt_setup = imp.load_source('orangeqt_setup', os.path.join(os.path.dirname(__file__), 'source/orangeqt/setup.py'))
build_pyqt_ext = orangeqt_setup.build_pyqt_ext
except ImportError:
orangeqt_setup = None
build_pyqt_ext = None
except Exception:
# should display a warning about this (configuraiton error ...)
orangeqt_setup = None
build_pyqt_ext = None
class LibStatic(Extension):
pass
class PyXtractExtension(Extension):
def __init__(self, *args, **kwargs):
for name, default in [("extra_pyxtract_cmds", []), ("lib_type", "dynamic")]:
setattr(self, name, kwargs.get(name, default))
if name in kwargs:
del kwargs[name]
Extension.__init__(self, *args, **kwargs)
class PyXtractSharedExtension(PyXtractExtension):
pass
class pyxtract_build_ext(build_ext):
def run_pyxtract(self, ext, dir):
original_dir = os.path.realpath(os.path.curdir)
log.info("running pyxtract for %s" % ext.name)
try:
os.chdir(dir)
## we use the commands which are used for building under windows
pyxtract_cmds = [cmd.split() for cmd in getattr(ext, "extra_pyxtract_cmds", [])]
if os.path.exists("_pyxtract.bat"):
pyxtract_cmds.extend([cmd.split()[1:] for cmd in open("_pyxtract.bat").read().strip().splitlines()])
for cmd in pyxtract_cmds:
log.info(" ".join([sys.executable] + cmd))
check_call([sys.executable] + cmd)
if pyxtract_cmds:
ext.include_dirs.append(os.path.join(dir, "ppp"))
ext.include_dirs.append(os.path.join(dir, "px"))
finally:
os.chdir(original_dir)
def finalize_options(self):
build_ext.finalize_options(self)
# add the build_lib dir and build_temp (for
# liborange_include and liborange linking)
if not self.inplace:
# for linking with liborange.so (it is in Orange package)
self.library_dirs.append(os.path.join(self.build_lib, "Orange"))
# for linking with liborange_include.a
self.library_dirs.append(self.build_temp)
else:
# for linking with liborange.so
self.library_dirs.append("./Orange")
# for linking with liborange_include.a
self.library_dirs.append(self.build_temp)
def build_extension(self, ext):
if isinstance(ext, LibStatic):
# Build static library
self.build_static(ext)
elif isinstance(ext, PyXtractExtension):
# Build pyextract extension
self.build_pyxtract(ext)
elif orangeqt_setup and isinstance(ext, orangeqt_setup.PyQt4Extension):
# Skip the build (will be handled by build_pyqt_ext command)
return
else:
build_ext.build_extension(self, ext)
if isinstance(ext, PyXtractSharedExtension):
# Fix extension modules so they can be linked
# by other modules
if self.dry_run:
# No need to do anything here.
return
if isinstance(self.compiler, MSVCCompiler):
# Copy ${TEMP}/orange/orange.lib to ${BUILD}/orange.lib
ext_fullpath = self.get_ext_fullpath(ext.name)
# Get the last component of the name
ext_name = ext.name.rsplit(".", 1)[-1]
libs = glob.glob(os.path.join(self.build_temp,
"*", "*", ext_name + ".lib"))
if not libs:
log.info("Could not locate library %r in directory %r" \
% (ext_name, self.build_temp))
else:
lib = libs[0]
lib_path = os.path.splitext(ext_fullpath)[0] + ".lib"
copy_file(lib, lib_path, dry_run=self.dry_run)
else:
# Make lib{name}.so link to {name}.so
ext_path = self.get_ext_fullpath(ext.name)
ext_path, ext_filename = os.path.split(ext_path)
realpath = os.path.realpath(os.curdir)
try:
os.chdir(ext_path)
# Get the shared library name
_, name = ext.name.rsplit(".", 1)
lib_filename = self.compiler.library_filename(name, lib_type="shared")
# Create the link
copy_file(ext_filename, lib_filename, link="sym",
dry_run=self.dry_run)
except OSError, ex:
log.info("failed to create shared library for %s: %s" % (ext.name, str(ex)))
finally:
os.chdir(realpath)
def build_pyxtract(self, ext):
## mostly copied from build_extension
sources = ext.sources
if sources is None or type(sources) not in (types.ListType, types.TupleType):
raise DistutilsSetupError, \
("in 'ext_modules' option (extension '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % ext.name
sources = list(sources)
ext_path = self.get_ext_fullpath(ext.name)
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_path, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
# First, scan the sources for SWIG definition files (.i), run
# SWIG on 'em to create .c files, and modify the sources list
# accordingly.
sources = self.swig_sources(sources, ext)
# Run pyxtract in dir this adds ppp and px dirs to include_dirs
dir = os.path.commonprefix([os.path.split(s)[0] for s in ext.sources])
self.run_pyxtract(ext, dir)
# Next, compile the source code to object files.
# XXX not honouring 'define_macros' or 'undef_macros' -- the
# CCompiler API needs to change to accommodate this, and I
# want to do one thing at a time!
# Two possible sources for extra compiler arguments:
# - 'extra_compile_args' in Extension object
# - CFLAGS environment variable (not particularly
# elegant, but people seem to expect it and I
# guess it's useful)
# The environment variable should take precedence, and
# any sensible compiler will give precedence to later
# command line args. Hence we combine them in order:
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends)
# XXX -- this is a Vile HACK!
#
# The setup.py script for Python on Unix needs to be able to
# get this list so it can perform all the clean up needed to
# avoid keeping object files around when cleaning out a failed
# build of an extension module. Since Distutils does not
# track dependencies, we have to get rid of intermediates to
# ensure all the intermediates will be properly re-built.
#
self._built_objects = objects[:]
# Now link the object files together into a "shared object" --
# of course, first we have to figure out all the other things
# that go into the mix.
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
# Detect target language, if not provided
language = ext.language or self.compiler.detect_language(sources)
self.compiler.link_shared_object(
objects, ext_path,
libraries=self.get_libraries(ext),
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=language)
def build_static(self, ext):
## mostly copied from build_extension, changed
sources = ext.sources
if sources is None or type(sources) not in (types.ListType, types.TupleType):
raise DistutilsSetupError, \
("in 'ext_modules' option (extension '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % ext.name
sources = list(sources)
# Static libs get build in the build_temp directory
output_dir = self.build_temp
if not os.path.exists(output_dir): #VSC fails if the dir does not exist
os.makedirs(output_dir)
lib_filename = self.compiler.library_filename(ext.name, lib_type='static', output_dir=output_dir)
depends = sources + ext.depends
if not (self.force or newer_group(depends, lib_filename, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
# First, scan the sources for SWIG definition files (.i), run
# SWIG on 'em to create .c files, and modify the sources list
# accordingly.
sources = self.swig_sources(sources, ext)
# Next, compile the source code to object files.
# XXX not honouring 'define_macros' or 'undef_macros' -- the
# CCompiler API needs to change to accommodate this, and I
# want to do one thing at a time!
# Two possible sources for extra compiler arguments:
# - 'extra_compile_args' in Extension object
# - CFLAGS environment variable (not particularly
# elegant, but people seem to expect it and I
# guess it's useful)
# The environment variable should take precedence, and
# any sensible compiler will give precedence to later
# command line args. Hence we combine them in order:
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends)
# XXX -- this is a Vile HACK!
#
# The setup.py script for Python on Unix needs to be able to
# get this list so it can perform all the clean up needed to
# avoid keeping object files around when cleaning out a failed
# build of an extension module. Since Distutils does not
# track dependencies, we have to get rid of intermediates to
# ensure all the intermediates will be properly re-built.
#
self._built_objects = objects[:]
# Now link the object files together into a "shared object" --
# of course, first we have to figure out all the other things
# that go into the mix.
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
# Detect target language, if not provided
language = ext.language or self.compiler.detect_language(sources)
#first remove old library (ar only appends the contents if archive already exists)
try:
os.remove(lib_filename)
except OSError, ex:
log.debug("failed to remove obsolete static library %s: %s" %(ext.name, str(ex)))
# The static library is created in the temp dir, it is used during the compile step only
# it should not be included in the final install
self.compiler.create_static_lib(
objects, ext.name, output_dir,
debug=self.debug,
target_lang=language)
def get_libraries(self, ext):
""" Change the 'orange' library name to 'orange_d' if
building in debug mode. Using ``get_ext_filename`` to discover if
_d postfix is required.
"""
libraries = build_ext.get_libraries(self, ext)
if "orange" in libraries and self.debug:
filename = self.get_ext_filename("orange")
basename = os.path.basename(filename)
name, ext = os.path.splitext(basename)
if name.endswith("_d"):
index = libraries.index("orange")
libraries[index] = "orange_d"
return libraries
if not hasattr(build_ext, "get_ext_fullpath"):
#On mac OS X python 2.6.1 distutils does not have this method
def get_ext_fullpath(self, ext_name):
"""Returns the path of the filename for a given extension.
The file is located in `build_lib` or directly in the package
(inplace option).
"""
import string
# makes sure the extension name is only using dots
all_dots = string.maketrans('/' + os.sep, '..')
ext_name = ext_name.translate(all_dots)
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(ext_name)
filename = os.path.split(filename)[-1]
if not self.inplace:
# no further work needed
# returning :
# build_dir/package/path/filename
filename = os.path.join(*modpath[:-1] + [filename])
return os.path.join(self.build_lib, filename)
# the inplace option requires to find the package directory
# using the build_py command for that
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
# returning
# package_dir/filename
return os.path.join(package_dir, filename)
# Add build_pyqt_ext to build subcommands
class orange_build(build):
def has_pyqt_extensions(self):
# For now this is disabled unless specifically requested
# using build_pyqt_ext command
return False
# return any([isinstance(ext, orangeqt_setup.PyQt4Extension) \
# for ext in self.distribution.ext_modules]
# )
sub_commands = build.sub_commands
if orangeqt_setup:
sub_commands += [("build_pyqt_ext", has_pyqt_extensions)]
class orange_install_lib(install_lib):
""" An command to install orange (preserves liborange.so -> orange.so symlink)
"""
def run(self):
install_lib.run(self)
def copy_tree(self, infile, outfile, preserve_mode=1, preserve_times=1,
preserve_symlinks=1, level=1):
""" Run copy_tree with preserve_symlinks=1 as default
"""
install_lib.copy_tree(self, infile, outfile, preserve_mode,
preserve_times, preserve_symlinks, level)
def install(self):
""" Copy build_dir to install_dir
"""
# Unlink liborange.so -> orange.so if it already exists,
# because copy_tree fails to overwrite it
liborange = os.path.join(self.install_dir, "Orange", "liborange.so")
if os.path.islink(liborange):
log.info("unlinking %s -> %s", liborange,
os.path.join(self.install_dir, "orange.so"))
if not self.dry_run:
os.unlink(liborange)
return install_lib.install(self)
class orange_install(install):
""" A command to install orange while also creating
a .pth path to access the old orng* modules and orange,
orangeom etc.
"""
def run(self):
install.run(self)
# Create a .pth file with a path inside the Orange/orng directory
# so the old modules are importable
self.path_file, self.extra_dirs = ("Orange-orng-modules", "Orange/orng")
self.extra_dirs = convert_path(self.extra_dirs)
log.info("creating portal path for orange compatibility.")
self.create_path_file()
self.path_file, self.extra_dirs = None, None
def get_source_files(path, ext="cpp", exclude=[]):
files = glob.glob(os.path.join(path, "*." + ext))
files = [file for file in files if os.path.basename(file) not in exclude]
return files
# common library statically linked into orange, orangeom, ...
include_ext = LibStatic(
"orange_include",
get_source_files("source/include/"),
include_dirs=include_dirs,
extra_compile_args=extra_compile_args
)
if sys.platform == "win32": # ?? mingw/cygwin
libraries = ["orange_include"]
else:
libraries = ["stdc++", "orange_include"]
# Custom site configuration
site = site_config()
orange_sources = get_source_files("source/orange/")
orange_include_dirs = list(include_dirs)
orange_library_dirs = []
orange_libraries = list(libraries)
if site.blas:
# Link external blas library
orange_libraries += site.blas.libraries
orange_library_dirs += site.blas.library_dirs
else:
orange_sources += get_source_files("source/orange/blas/", "c")
if site.liblinear:
# Link external LIBLINEAR library
orange_libraries += site.liblinear.libraries
orange_include_dirs += site.liblinear.include_dirs
orange_library_dirs += site.liblinear.library_dirs
else:
orange_sources += get_source_files("source/orange/liblinear/", "cpp")
orange_include_dirs += ["source/orange/liblinear"]
if site.libsvm:
# Link external LibSVM library
orange_libraries += site.libsvm.libraries
orange_include_dirs += site.libsvm.include_dirs
orange_library_dirs += site.libsvm.library_dirs
else:
orange_sources += get_source_files("source/orange/libsvm/", "cpp")
orange_ext = PyXtractSharedExtension(
"Orange.orange",
orange_sources,
include_dirs=orange_include_dirs,
extra_compile_args=extra_compile_args + ["-DORANGE_EXPORTS"],
extra_link_args=extra_link_args,
libraries=orange_libraries,
library_dirs=orange_library_dirs,
extra_pyxtract_cmds=["../pyxtract/defvectors.py"],
)
if sys.platform == "darwin":
build_shared_cmd = get_config_var("BLDSHARED")
# Dont link liborange.so with orangeom and orangene - MacOS X treats
# loadable modules and shared libraries different
if "-bundle" in build_shared_cmd.split():
shared_libs = libraries
else:
shared_libs = libraries + ["orange"]
else:
shared_libs = libraries + ["orange"]
orangeom_sources = get_source_files(
"source/orangeom/", exclude=["lib_vectors.cpp"])
orangeom_libraries = list(shared_libs)
orangeom_include_dirs = list(include_dirs)
orangeom_library_dirs = []
if site.qhull:
# Link external qhull library
orangeom_libraries += site.qhull.libraries
orangeom_include_dirs += site.qhull.include_dirs
orangeom_library_dirs += site.qhull.library_dirs
else:
orangeom_sources += get_source_files("source/orangeom/qhull/", "c")
orangeom_include_dirs += ["source/orangeom"]
orangeom_ext = PyXtractExtension(
"Orange.orangeom",
orangeom_sources,
include_dirs=orangeom_include_dirs + ["source/orange/"],
extra_compile_args=extra_compile_args + ["-DORANGEOM_EXPORTS"],
extra_link_args=extra_link_args,
libraries=orangeom_libraries,
library_dirs=orangeom_library_dirs
)
orangene_ext = PyXtractExtension(
"Orange.orangene",
get_source_files("source/orangene/", exclude=["lib_vectors.cpp"]),
include_dirs=include_dirs + ["source/orange/"],
extra_compile_args=extra_compile_args + ["-DORANGENE_EXPORTS"],
extra_link_args=extra_link_args,
libraries=shared_libs,
)
corn_ext = Extension(
"Orange.corn", get_source_files("source/corn/"),
include_dirs=include_dirs + ["source/orange/"],
extra_compile_args=extra_compile_args + ["-DCORN_EXPORTS"],
extra_link_args=extra_link_args,
libraries=libraries
)
statc_ext = Extension(
"Orange.statc", get_source_files("source/statc/"),
include_dirs=include_dirs + ["source/orange/"],
extra_compile_args=extra_compile_args + ["-DSTATC_EXPORTS"],
extra_link_args=extra_link_args,
libraries=libraries
)
ext_modules = [include_ext, orange_ext, orangeom_ext,
orangene_ext, corn_ext, statc_ext]
cmdclass = {"build": orange_build,
"build_ext": pyxtract_build_ext,
"install_lib": orange_install_lib,
"install": orange_install}
if orangeqt_setup:
orangeqt_ext = orangeqt_setup.orangeqt_ext
# Fix relative paths, name etc.
orangeqt_ext.name = "Orange.orangeqt"
orangeqt_ext.sources = ["source/orangeqt/orangeqt.sip"] + \
get_source_files("source/orangeqt", "cpp",
exclude=["canvas3d.cpp", "plot3d.cpp",
"glextensions.cpp"]
)
orangeqt_ext.include_dirs += ["source/orangeqt"]
ext_modules += [orangeqt_ext]
cmdclass["build_pyqt_ext"] = build_pyqt_ext
def all_with_extension(path, extensions):
return [os.path.join(path, "*.%s"%extension) for extension in extensions]
# TODO: Simply replace with include_package_data = True and configure missed files in MANIFEST.in?
#Marko 20120128: Removed "doc/style.css", "doc/widgets/*/*.*" from the package
def get_package_data():
package_data = {
"Orange":
["orangerc.cfg" ] +\
all_with_extension(path="datasets", extensions=("tab", "csv", "basket")) +\
all_with_extension(path="testing/regression/tests_20", extensions=("net", "tab", "basket", "csv")),
"Orange.OrangeCanvas": ["icons/*.png", "icons/*.svg", "icons/*.ico"
"orngCanvas.pyw", "WidgetTabs.txt"],
"Orange.OrangeCanvas.styles": ["*.qss", "orange/*.svg"],
"Orange.OrangeCanvas.application.tutorials": ["*.ows"],
"Orange.OrangeWidgets": ["icons/*.png", "icons/backgrounds/*.png",
"report/index.html"],
"Orange.OrangeWidgets.Associate": ["icons/*.png", "icons/*.svg"],
"Orange.OrangeWidgets.Classify": ["icons/*.png", "icons/*.svg"],
"Orange.OrangeWidgets.Data": ["icons/*.png", "icons/*.svg"],
"Orange.OrangeWidgets.Evaluate": ["icons/*.png", "icons/*.svg"],
"Orange.OrangeWidgets.Prototypes": ["icons/*.png", "icons/*.svg"],
"Orange.OrangeWidgets.Regression": ["icons/*.png", "icons/*.svg"],
"Orange.OrangeWidgets.Unsupervised": ["icons/*.png", "icons/*.svg"],
"Orange.OrangeWidgets.Visualize": ["icons/*.png", "icons/*.svg"],
"Orange.OrangeWidgets.Visualize Qt": ["icons/*.png", "icons/*.svg"],
"Orange.OrangeWidgets.Utilities": ["icons/*.png", "icons/*.svg"],
"Orange.OrangeWidgets.plot": ["*.gs", "*.vs"],
"Orange.OrangeWidgets.plot.primitives": ["*.obj"],
}
return package_data
def git_version():
"""Return the git revision as a string.
Copied from numpy setup.py
"""
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout = subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def write_version_py(filename='Orange/version.py'):
# Copied from numpy setup.py
cnt = """
# THIS FILE IS GENERATED FROM ORANGE SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
short_version += ".dev"
"""
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('Orange/version.py'):
# must be a source distribution, use existing version file
version = imp.load_source("Orange.version", "Orange/version.py")
GIT_REVISION = version.git_revision
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev-' + GIT_REVISION[:7]
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
PACKAGES = find_packages()
PACKAGE_DATA = get_package_data()
SETUP_REQUIRES = (
'setuptools',
)
# If you change the requirements, check whether all ADD-ons still work!
INSTALL_REQUIRES = (
'setuptools',
'numpy',
'scipy',
)
EXTRAS_REQUIRE = {
'GUI': (
'PyQt4',
'PyQwt',
),
'reST': (
'numpydoc',
),
}
DEPENDENCY_LINKS = (
)
ENTRY_POINTS = {
'gui_scripts': (
'orange-canvas = Orange.OrangeCanvas.main:main',
),
'orange.canvas.help': (
'intersphinx = Orange.OrangeWidgets:intersphinx',
)
}
def setup_package():
write_version_py()
setup(
name = NAME,
version = VERSION,
description = DESCRIPTION,
long_description = LONG_DESCRIPTION,
author = AUTHOR,
author_email = AUTHOR_EMAIL,
url = URL,
download_url = DOWNLOAD_URL,
license = LICENSE,
keywords = KEYWORDS,
classifiers = CLASSIFIERS,
packages = PACKAGES,
package_data = PACKAGE_DATA,
setup_requires = SETUP_REQUIRES,
extras_require = EXTRAS_REQUIRE,
install_requires = INSTALL_REQUIRES,
dependency_links = DEPENDENCY_LINKS,
entry_points = ENTRY_POINTS,
include_package_data = True,
zip_safe = False,
test_suite = 'Orange.testing.unit.tests.test_suite',
cmdclass = cmdclass,
ext_modules = ext_modules,
)
if __name__ == '__main__':
setup_package()
| gpl-3.0 |
DistrictDataLabs/yellowbrick | yellowbrick/datasaurus.py | 1 | 33969 | # yellowbrick.datasaurus
# Plots a Datasaurus Quartet as an illustration of the importance of visualization.
#
# Author: Larry Gray
# Created: Wed Jun 20 15:17:35 2018 -0400
#
# Copyright (C) 2018 The sckit-yb developers
# For license information, see LICENSE.txt
#
# ID: datasaurus.py [e49d780] [email protected] $
"""
Plots a Datasaurus Quartet as an illustration of the importance of visualization.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
import matplotlib.pyplot as plt
from yellowbrick.bestfit import draw_best_fit
from yellowbrick.style import get_color_cycle
##########################################################################
## DATASAURUS Data Arrays
##########################################################################
DATASAURUS = [
np.array(
[
[
55.3846,
51.5385,
46.1538,
42.8205,
40.7692,
38.7179,
35.641,
33.0769,
28.9744,
26.1538,
23.0769,
22.3077,
22.3077,
23.3333,
25.8974,
29.4872,
32.8205,
35.3846,
40.2564,
44.1026,
46.6667,
50.0,
53.0769,
56.6667,
59.2308,
61.2821,
61.5385,
61.7949,
57.4359,
54.8718,
52.5641,
48.2051,
49.4872,
51.0256,
45.3846,
42.8205,
38.7179,
35.1282,
32.5641,
30.0,
33.5897,
36.6667,
38.2051,
29.7436,
29.7436,
30.0,
32.0513,
35.8974,
41.0256,
44.1026,
47.1795,
49.4872,
51.5385,
53.5897,
55.1282,
56.6667,
59.2308,
62.3077,
64.8718,
67.9487,
70.5128,
71.5385,
71.5385,
69.4872,
46.9231,
48.2051,
50.0,
53.0769,
55.3846,
56.6667,
56.1538,
53.8462,
51.2821,
50.0,
47.9487,
29.7436,
29.7436,
31.2821,
57.9487,
61.7949,
64.8718,
68.4615,
70.7692,
72.0513,
73.8462,
75.1282,
76.6667,
77.6923,
79.7436,
81.7949,
83.3333,
85.1282,
86.4103,
87.9487,
89.4872,
93.3333,
95.3846,
98.2051,
56.6667,
59.2308,
60.7692,
63.0769,
64.1026,
64.359,
74.359,
71.2821,
67.9487,
65.8974,
63.0769,
61.2821,
58.7179,
55.1282,
52.3077,
49.7436,
47.4359,
44.8718,
48.7179,
51.2821,
54.1026,
56.1538,
52.0513,
48.7179,
47.1795,
46.1538,
50.5128,
53.8462,
57.4359,
60.0,
64.1026,
66.9231,
71.2821,
74.359,
78.2051,
67.9487,
68.4615,
68.2051,
37.6923,
39.4872,
91.2821,
50.0,
47.9487,
44.1026,
],
[
97.1795,
96.0256,
94.4872,
91.4103,
88.3333,
84.8718,
79.8718,
77.5641,
74.4872,
71.4103,
66.4103,
61.7949,
57.1795,
52.9487,
51.0256,
51.0256,
51.0256,
51.4103,
51.4103,
52.9487,
54.1026,
55.2564,
55.641,
56.0256,
57.9487,
62.1795,
66.4103,
69.1026,
55.2564,
49.8718,
46.0256,
38.3333,
42.1795,
44.1026,
36.4103,
32.5641,
31.4103,
30.2564,
32.1795,
36.7949,
41.4103,
45.641,
49.1026,
36.0256,
32.1795,
29.1026,
26.7949,
25.2564,
25.2564,
25.641,
28.718,
31.4103,
34.8718,
37.5641,
40.641,
42.1795,
44.4872,
46.0256,
46.7949,
47.9487,
53.718,
60.641,
64.4872,
69.4872,
79.8718,
84.1026,
85.2564,
85.2564,
86.0256,
86.0256,
82.9487,
80.641,
78.718,
78.718,
77.5641,
59.8718,
62.1795,
62.5641,
99.4872,
99.1026,
97.5641,
94.1026,
91.0256,
86.4103,
83.3333,
79.1026,
75.2564,
71.4103,
66.7949,
60.2564,
55.2564,
51.4103,
47.5641,
46.0256,
42.5641,
39.8718,
36.7949,
33.718,
40.641,
38.3333,
33.718,
29.1026,
25.2564,
24.1026,
22.9487,
22.9487,
22.1795,
20.2564,
19.1026,
19.1026,
18.3333,
18.3333,
18.3333,
17.5641,
16.0256,
13.718,
14.8718,
14.8718,
14.8718,
14.1026,
12.5641,
11.0256,
9.8718,
6.0256,
9.4872,
10.2564,
10.2564,
10.641,
10.641,
10.641,
10.641,
10.641,
10.641,
8.718,
5.2564,
2.9487,
25.7692,
25.3846,
41.5385,
95.7692,
95.0,
92.6923,
],
]
),
np.array(
[
[
51.20389114,
58.9744699,
51.87207267,
48.17993079,
41.6832004,
37.8904155,
39.54897369,
39.64957388,
34.75059705,
27.56083529,
24.63553998,
20.95946481,
20.68914905,
19.28820474,
20.02450057,
35.469523,
36.89432765,
39.05554978,
46.95708015,
37.31045274,
40.009672,
48.01438668,
53.70377593,
63.06749989,
62.04803251,
59.83996671,
55.16094182,
61.27978658,
60.83491753,
61.52059065,
36.91654386,
38.50219967,
48.66437073,
50.2852524,
42.27633267,
54.03177562,
37.32935526,
41.38952255,
40.07466666,
35.34968062,
34.76370042,
37.02662945,
36.45556953,
35.53766421,
20.40894789,
23.49571047,
29.55754336,
33.00823391,
53.98039918,
52.2343086,
59.50307661,
41.16378107,
48.99304012,
59.26928032,
45.469177,
62.69126654,
73.42867087,
70.84642611,
71.53901985,
67.62086589,
72.47095256,
64.81223756,
60.85367987,
67.78949616,
41.60955727,
53.00302532,
54.71417106,
44.29166872,
49.19172196,
53.10138178,
51.59984815,
54.37972195,
46.4807681,
53.17465627,
45.27200294,
36.03340215,
28.27119417,
25.05480608,
64.758887,
63.14452748,
50.42467869,
70.64499626,
63.14904908,
62.82402452,
70.23686951,
70.04273524,
72.57062345,
75.13071604,
83.29390573,
79.66426228,
88.43210253,
89.11555901,
89.09219763,
91.72600577,
91.73553876,
91.50788817,
88.2390019,
88.5305192,
55.36516034,
62.56025887,
58.00666912,
55.06711799,
61.61477596,
68.54314354,
77.70610965,
68.453046,
68.25720644,
70.25547467,
65.04432528,
60.09224661,
52.99202897,
50.14462898,
46.50861419,
43.80703196,
57.81785469,
50.94049266,
63.49732308,
50.01648295,
58.63676508,
54.73028909,
65.8755478,
57.06098271,
46.81990795,
38.35939487,
47.31541578,
55.05191654,
50.51596026,
49.67741465,
67.28065952,
66.17301826,
61.08854414,
66.05308577,
72.66998927,
61.5034725,
68.99502863,
78.24991617,
36.48198057,
50.96774838,
91.19105361,
55.86376849,
49.2805948,
43.36850154,
],
[
83.33977661,
85.49981761,
85.82973763,
85.04511674,
84.0179406,
82.567493,
80.81260177,
82.66453387,
80.01109099,
72.84782559,
71.61071483,
66.04149838,
62.72130521,
62.06305936,
61.34262387,
43.11588495,
47.70655597,
55.54697371,
65.24040739,
45.2587509,
60.98658251,
65.71281959,
66.38948204,
64.03500046,
63.84586325,
64.47676444,
65.23730817,
65.7664025,
64.60376971,
64.79185504,
41.09524744,
41.56715562,
30.68066685,
30.33792211,
34.52763612,
29.67234831,
39.60204231,
37.29605623,
34.6236852,
47.14107313,
47.62479992,
44.46229305,
40.79184303,
48.72938687,
32.20303042,
25.32246815,
21.36477746,
15.98507146,
29.35098671,
29.71167299,
30.66967394,
34.31575825,
32.03035884,
29.64070177,
33.83119273,
30.29037383,
48.57785513,
52.28225333,
45.52180616,
38.00655847,
51.12213482,
62.81091559,
65.49914703,
61.36370155,
83.84868656,
84.6747986,
84.04312807,
82.90944121,
85.87622912,
84.54765869,
84.81982149,
84.24035555,
83.51821167,
84.26056799,
85.23707942,
53.37168776,
72.84023126,
71.54859792,
82.31522364,
85.23669633,
85.17474759,
82.43091876,
83.94685535,
84.96618595,
82.17115106,
80.38502135,
80.97121843,
79.98409314,
70.77843179,
73.93230972,
64.624247,
64.00150664,
57.76819305,
52.62335326,
48.97021089,
53.31265209,
31.47743488,
30.47603101,
30.44585028,
30.44713567,
30.2537213,
29.0115352,
29.99439119,
35.65783217,
20.30426019,
13.03552859,
12.38463915,
13.25038497,
11.00084148,
11.87211171,
9.90666848,
12.21154309,
11.20713449,
11.31894489,
10.94514243,
9.69154713,
11.91406917,
11.93385209,
11.97472107,
11.41288267,
11.73243636,
9.92056085,
10.49465268,
13.43132262,
12.85345178,
11.94998862,
9.76559162,
10.38313251,
14.12865153,
12.03791702,
10.08453441,
13.38022601,
15.23422594,
10.82841448,
13.99431053,
17.88324091,
15.16276009,
29.67977429,
46.67434284,
85.33648676,
84.04882283,
84.3321772,
],
]
),
np.array(
[
[
58.21360826,
58.19605369,
58.71823072,
57.27837287,
58.08202049,
57.48944777,
28.08874132,
28.08546821,
28.08727305,
27.57802522,
27.77991911,
28.58899981,
28.7391415,
27.02460324,
28.8013367,
27.18646384,
29.2851466,
39.4029453,
28.81132844,
34.30395791,
29.60276098,
49.11615686,
39.61754583,
43.23308466,
64.89278794,
62.49014932,
68.98808443,
62.10561863,
32.46184674,
41.32720065,
44.00714993,
44.07406069,
44.00131524,
45.00630045,
44.44384061,
42.1787134,
44.04456562,
41.64045402,
41.93833001,
44.05392751,
39.20671933,
28.70444923,
31.7086629,
42.81171147,
43.30061489,
40.39863291,
40.43569158,
40.93654667,
39.66157367,
40.89925917,
41.96861683,
40.38340582,
56.53812645,
52.97069128,
54.62095259,
65.09904439,
63.05599091,
70.96013623,
69.89581924,
70.59589286,
69.64702143,
77.39298249,
64.40078719,
63.86895983,
56.59442132,
56.53133729,
59.65215837,
56.6365087,
58.672288,
58.22161273,
57.91466448,
55.31550906,
54.57572859,
54.41309365,
55.0745059,
29.43296052,
29.42268607,
29.00561416,
58.46183859,
57.99780474,
57.54947408,
59.52992846,
58.24939106,
58.02451401,
58.38212449,
62.56675904,
72.17582431,
79.47276157,
80.35770088,
78.75723614,
82.54023959,
86.43589719,
79.48868442,
81.53042032,
79.18678857,
77.89905795,
75.13071421,
76.05801375,
57.61467439,
56.17139753,
66.2878906,
67.88171962,
64.0280813,
77.49665175,
77.63465176,
77.86372643,
77.33815817,
76.18041653,
77.25265109,
77.41337528,
76.7318494,
49.47110541,
42.47653994,
43.59511586,
50.33996967,
40.74898026,
38.38652558,
38.40401521,
38.76427889,
41.47014233,
47.15540481,
39.58256675,
41.74024382,
39.31187189,
41.67984769,
39.08746445,
41.48150286,
77.60608655,
75.98266152,
76.94575724,
77.54372007,
77.58473984,
76.82230426,
77.34857166,
77.57315269,
77.97261068,
41.52891976,
43.7225508,
79.32607818,
56.66397408,
57.82178923,
58.2431719,
],
[
91.88189151,
92.21498865,
90.31053209,
89.90760672,
92.00814501,
88.08528556,
63.51079443,
63.59019695,
63.12328281,
62.82103866,
63.51814752,
63.02408057,
62.72086389,
62.90185886,
63.38904039,
63.55872965,
63.38360583,
51.1508572,
61.35785406,
56.54212591,
60.15734672,
63.66000062,
62.92518796,
63.16521872,
65.81417676,
74.58428961,
63.2321473,
75.99087076,
62.88190292,
49.07025127,
46.44967378,
34.55320389,
33.90420735,
38.29901955,
36.0190833,
26.49211948,
35.66223828,
27.09309542,
24.99152298,
33.55639249,
51.5337157,
61.7775254,
58.83775437,
30.02044842,
31.5264262,
16.34700838,
20.23267068,
16.91300484,
15.60935558,
20.79852895,
26.4970726,
21.39122552,
32.44424547,
29.04019669,
30.34452445,
27.24155756,
29.70909567,
41.25950129,
43.45375927,
41.96474387,
44.04444502,
63.37145906,
67.44871845,
70.21373883,
86.92700622,
87.49981107,
87.80946159,
85.63749556,
90.07716031,
90.41101877,
89.95380277,
80.25186069,
77.53628847,
78.22908659,
79.81754642,
60.80177654,
63.06846482,
63.39075133,
90.26532639,
92.15990861,
90.74890656,
88.32727415,
92.12968148,
91.69442117,
90.55347607,
77.74393476,
63.12892942,
63.40868612,
63.29543754,
53.33262001,
56.54105229,
59.79276181,
53.65167426,
56.02536457,
53.23479185,
51.82245833,
23.37244197,
16.38374969,
33.82244765,
32.11798877,
26.11710975,
24.23601841,
27.67268551,
14.94852356,
14.46185393,
14.61067765,
15.89005466,
15.91257375,
15.15151702,
15.22192798,
16.21684614,
25.06301931,
18.33847356,
19.99420098,
26.47139661,
16.18214166,
14.58021515,
14.45194845,
14.36559047,
17.27803344,
22.37793253,
17.64845284,
17.82932431,
15.64071697,
17.74591901,
15.12230394,
18.04743744,
15.16287254,
16.30692238,
15.85847833,
15.25394915,
15.83003939,
15.59516532,
15.77452924,
14.78064583,
14.95569875,
24.91642519,
19.0773278,
52.90039129,
87.94012501,
90.69316655,
92.10432787,
],
]
),
np.array(
[
[
51.14791671,
50.51712581,
50.2074802,
50.06948192,
50.56284634,
50.2885278,
25.58347508,
25.48358339,
25.4435257,
25.56511342,
25.92884427,
27.55147826,
27.53046637,
27.09557036,
27.43924961,
27.87826426,
27.33886892,
27.67840297,
52.63565768,
52.02521411,
52.88116479,
52.95260731,
52.52055249,
52.34282206,
51.92759021,
52.71377449,
50.44380279,
50.21669503,
52.18418011,
52.79209735,
52.58971986,
52.02884867,
52.72924658,
52.88431329,
52.50930089,
50.86268433,
50.89149225,
25.8551276,
26.02564455,
27.89317272,
27.63996794,
27.8926589,
52.79773294,
27.58063881,
26.49139853,
25.98531782,
26.20141928,
25.85756947,
50.70468436,
50.81197535,
50.56484556,
50.93930391,
50.45885484,
52.90136407,
52.68495344,
52.50008894,
51.83563726,
76.9954121,
77.31060048,
77.92604434,
77.25438834,
76.2431578,
77.08448437,
75.2280532,
50.65835477,
50.20336581,
50.9295477,
50.17867185,
50.42269806,
50.46422483,
50.44927033,
49.92838028,
50.48801364,
49.96490538,
50.75210826,
27.42242921,
27.6740834,
27.53739532,
52.26334738,
51.73728166,
75.87096369,
75.24432621,
75.19829529,
75.70104153,
75.47933966,
75.19456687,
74.82025396,
75.16434049,
75.26335555,
77.75641893,
77.95443505,
77.08333777,
76.06355025,
77.68201632,
76.87808198,
76.94850272,
77.86405471,
75.77145009,
52.33156913,
52.59281837,
50.47704772,
75.29647509,
75.57395413,
75.40052716,
75.87099084,
75.60588476,
75.89557705,
75.7465632,
75.14234148,
50.66177956,
50.69985064,
50.91894087,
50.72525854,
51.26387123,
51.25091965,
50.78515721,
50.50139658,
50.73367454,
50.71137854,
50.8127449,
51.01423295,
50.35352141,
50.43552957,
50.63098196,
51.0668072,
50.79235473,
50.55127806,
50.55975806,
75.32597855,
75.04472578,
75.28708772,
75.23996998,
75.1524592,
75.96184009,
75.44806251,
75.75938382,
50.3782623,
50.53363501,
77.50090732,
50.69112419,
49.99039495,
50.12718203,
],
[
90.86741233,
89.10239459,
85.4600474,
83.05766953,
82.93782178,
82.97525357,
82.91489113,
82.92908498,
82.8742005,
82.92409777,
82.82118411,
51.48738653,
51.41484656,
52.07679944,
51.71207905,
50.70890793,
51.65304675,
51.18198917,
51.41855226,
52.12301105,
50.62155476,
50.07473901,
51.5024421,
51.86195209,
52.25779061,
51.19794432,
82.94182882,
83.75234297,
51.97525067,
51.07339565,
51.3380902,
52.1768375,
51.20176505,
50.44143545,
51.41620515,
17.14563109,
17.14132373,
17.08190869,
16.92501353,
50.66196341,
51.39909748,
50.79528152,
50.68603709,
51.52476126,
17.40539097,
17.20372213,
17.09382391,
17.11384266,
17.02374454,
17.11492526,
17.07777732,
16.98102188,
17.03857897,
50.69056272,
51.29446922,
51.59435617,
52.33576553,
52.04552865,
51.74673004,
50.31866042,
51.46182482,
52.12368985,
51.9671367,
82.98566202,
83.11447934,
82.98265686,
82.84604113,
83.18462233,
82.90990147,
82.93532841,
83.96992038,
82.99366549,
83.09951912,
83.7083177,
82.9019501,
51.43887623,
51.30411215,
51.59365408,
94.24932783,
92.97911753,
88.38644174,
83.90349738,
83.46230334,
82.91945886,
82.88405139,
82.93211578,
82.96238879,
83.03499717,
82.9452793,
51.15177033,
50.47557897,
52.15779927,
52.10465206,
51.16563781,
51.8675623,
51.90751654,
49.66254553,
17.11125121,
51.87886035,
51.39159152,
17.04828941,
17.01565319,
17.06219214,
17.04110689,
17.13489391,
17.06772306,
17.16994971,
17.10571651,
16.75492389,
17.07814052,
17.08518438,
17.14760476,
16.90746981,
17.16234971,
17.24045586,
17.18019648,
17.10577072,
16.99296341,
17.08831585,
16.57271805,
17.22109553,
17.06474308,
17.0651685,
17.07652235,
17.20885971,
17.20421434,
17.08465518,
17.09388377,
15.77189199,
17.00426226,
16.17493491,
17.03184749,
17.0049424,
16.69484223,
17.04514941,
16.94292965,
16.94627981,
17.01958137,
50.16698595,
87.51396042,
83.99735692,
82.99075,
],
]
),
]
def datasaurus():
"""
Creates 2x2 grid plot of 4 from the Datasaurus Dozen datasets for illustration.
Citation:
Justin Matejka, George Fitzmaurice (2017)
Same Stats, Different Graphs: Generating Datasets with Varied Appearance and
Identical Statistics through Simulated Annealing
CHI 2017 Conference proceedings:
ACM SIGCHI Conference on Human Factors in Computing Systems
"""
_, ((axa, axb), (axc, axd)) = plt.subplots(2, 2, sharex="col", sharey="row")
colors = get_color_cycle()
for arr, ax, color in zip(DATASAURUS, (axa, axb, axc, axd), colors):
x = arr[0]
y = arr[1]
# Draw the points in the scatter plot
ax.scatter(x, y, c=color)
# Set the X and Y limits
ax.set_xlim(0, 100)
ax.set_ylim(0, 110)
# Draw the linear best fit line on the plot
draw_best_fit(x, y, ax, c=color)
return (axa, axb, axc, axd)
if __name__ == "__main__":
datasaurus()
plt.show()
| apache-2.0 |
Akshay0724/scikit-learn | sklearn/gaussian_process/gpc.py | 19 | 31639 | """Gaussian processes classification."""
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve
from scipy.optimize import fmin_l_bfgs_b
from scipy.special import erf
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.gaussian_process.kernels \
import RBF, CompoundKernel, ConstantKernel as C
from sklearn.utils.validation import check_X_y, check_is_fitted, check_array
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
# Values required for approximating the logistic sigmoid by
# error functions. coefs are obtained via:
# x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf])
# b = logistic(x)
# A = (erf(np.dot(x, self.lambdas)) + 1) / 2
# coefs = lstsq(A, b)[0]
LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis]
COEFS = np.array([-1854.8214151, 3516.89893646, 221.29346712,
128.12323805, -2010.49422654])[:, np.newaxis]
class _BinaryGaussianProcessClassifierLaplace(BaseEstimator):
"""Binary Gaussian process classification based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
``Gaussian Processes for Machine Learning'' (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer: int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict: int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
X_train_ : array-like, shape = (n_samples, n_features)
Feature values in training data (also required for prediction)
y_train_ : array-like, shape = (n_samples,)
Target values in training data (also required for prediction)
classes_ : array-like, shape = (n_classes,)
Unique class labels.
kernel_ : kernel object
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters
L_ : array-like, shape = (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in X_train_
pi_ : array-like, shape = (n_samples,)
The probabilities of the positive class for the training points
X_train_
W_sr_ : array-like, shape = (n_samples,)
Square root of W, the Hessian of log-likelihood of the latent function
values for the observed labels. Since W is diagonal, only the diagonal
of sqrt(W) is stored.
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") \
* RBF(1.0, length_scale_bounds="fixed")
else:
self.kernel_ = clone(self.kernel)
self.rng = check_random_state(self.random_state)
self.X_train_ = np.copy(X) if self.copy_X_train else X
# Encode class labels and check that it is a binary classification
# problem
label_encoder = LabelEncoder()
self.y_train_ = label_encoder.fit_transform(y)
self.classes_ = label_encoder.classes_
if self.classes_.size > 2:
raise ValueError("%s supports only binary classification. "
"y contains classes %s"
% (self.__class__.__name__, self.classes_))
elif self.classes_.size == 1:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta)
# First optimize starting from theta specified in kernel
optima = [self._constrained_optimization(obj_func,
self.kernel_.theta,
self.kernel_.bounds)]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = np.exp(self.rng.uniform(bounds[:, 0],
bounds[:, 1]))
optima.append(
self._constrained_optimization(obj_func, theta_initial,
bounds))
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
_, (self.pi_, self.W_sr_, self.L_, _, _) = \
self._posterior_mode(K, return_temporaries=True)
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# As discussed on Section 3.4.2 of GPML, for making hard binary
# decisions, it is enough to compute the MAP of the posterior and
# pass it through the link function
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4
return np.where(f_star > 0, self.classes_[1], self.classes_[0])
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute ``classes_``.
"""
check_is_fitted(self, ["X_train_", "y_train_", "pi_", "W_sr_", "L_"])
# Based on Algorithm 3.2 of GPML
K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star)
f_star = K_star.T.dot(self.y_train_ - self.pi_) # Line 4
v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5
# Line 6 (compute np.diag(v.T.dot(v)) via einsum)
var_f_star = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v)
# Line 7:
# Approximate \int log(z) * N(z | f_star, var_f_star)
# Approximation is due to Williams & Barber, "Bayesian Classification
# with Gaussian Processes", Appendix A: Approximate the logistic
# sigmoid by a linear combination of 5 error functions.
# For information on how this integral can be computed see
# blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html
alpha = 1 / (2 * var_f_star)
gamma = LAMBDAS * f_star
integrals = np.sqrt(np.pi / alpha) \
* erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) \
/ (2 * np.sqrt(var_f_star * 2 * np.pi))
pi_star = (COEFS * integrals).sum(axis=0) + .5 * COEFS.sum()
return np.vstack((1 - pi_star, pi_star)).T
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
kernel = self.kernel_.clone_with_theta(theta)
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Compute log-marginal-likelihood Z and also store some temporaries
# which can be reused for computing Z's gradient
Z, (pi, W_sr, L, b, a) = \
self._posterior_mode(K, return_temporaries=True)
if not eval_gradient:
return Z
# Compute gradient based on Algorithm 5.1 of GPML
d_Z = np.empty(theta.shape[0])
# XXX: Get rid of the np.diag() in the next line
R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7
C = solve(L, W_sr[:, np.newaxis] * K) # Line 8
# Line 9: (use einsum to compute np.diag(C.T.dot(C))))
s_2 = -0.5 * (np.diag(K) - np.einsum('ij, ij -> j', C, C)) \
* (pi * (1 - pi) * (1 - 2 * pi)) # third derivative
for j in range(d_Z.shape[0]):
C = K_gradient[:, :, j] # Line 11
# Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C)))
s_1 = .5 * a.T.dot(C).dot(a) - .5 * R.T.ravel().dot(C.ravel())
b = C.dot(self.y_train_ - pi) # Line 13
s_3 = b - K.dot(R.dot(b)) # Line 14
d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15
return Z, d_Z
def _posterior_mode(self, K, return_temporaries=False):
"""Mode-finding for binary Laplace GPC and fixed kernel.
This approximates the posterior of the latent function values for given
inputs and target observations with a Gaussian approximation and uses
Newton's iteration to find the mode of this approximation.
"""
# Based on Algorithm 3.1 of GPML
# If warm_start are enabled, we reuse the last solution for the
# posterior mode as initialization; otherwise, we initialize with 0
if self.warm_start and hasattr(self, "f_cached") \
and self.f_cached.shape == self.y_train_.shape:
f = self.f_cached
else:
f = np.zeros_like(self.y_train_, dtype=np.float64)
# Use Newton's iteration method to find mode of Laplace approximation
log_marginal_likelihood = -np.inf
for _ in range(self.max_iter_predict):
# Line 4
pi = 1 / (1 + np.exp(-f))
W = pi * (1 - pi)
# Line 5
W_sr = np.sqrt(W)
W_sr_K = W_sr[:, np.newaxis] * K
B = np.eye(W.shape[0]) + W_sr_K * W_sr
L = cholesky(B, lower=True)
# Line 6
b = W * f + (self.y_train_ - pi)
# Line 7
a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b))
# Line 8
f = K.dot(a)
# Line 10: Compute log marginal likelihood in loop and use as
# convergence criterion
lml = -0.5 * a.T.dot(f) \
- np.log(1 + np.exp(-(self.y_train_ * 2 - 1) * f)).sum() \
- np.log(np.diag(L)).sum()
# Check if we have converged (log marginal likelihood does
# not decrease)
# XXX: more complex convergence criterion
if lml - log_marginal_likelihood < 1e-10:
break
log_marginal_likelihood = lml
self.f_cached = f # Remember solution for later warm-starts
if return_temporaries:
return log_marginal_likelihood, (pi, W_sr, L, b, a)
else:
return log_marginal_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
theta_opt, func_min, convergence_dict = \
fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds)
if convergence_dict["warnflag"] != 0:
warnings.warn("fmin_l_bfgs_b terminated abnormally with the "
" state: %s" % convergence_dict)
elif callable(self.optimizer):
theta_opt, func_min = \
self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError("Unknown optimizer %s." % self.optimizer)
return theta_opt, func_min
class GaussianProcessClassifier(BaseEstimator, ClassifierMixin):
"""Gaussian process classification (GPC) based on Laplace approximation.
The implementation is based on Algorithm 3.1, 3.2, and 5.1 of
Gaussian Processes for Machine Learning (GPML) by Rasmussen and
Williams.
Internally, the Laplace approximation is used for approximating the
non-Gaussian posterior by a Gaussian.
Currently, the implementation is restricted to using the logistic link
function. For multi-class classification, several binary one-versus rest
classifiers are fitted. Note that this class thus does not implement
a true multi-class Laplace approximation.
Parameters
----------
kernel : kernel object
The kernel specifying the covariance function of the GP. If None is
passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that
the kernel's hyperparameters are optimized during fitting.
optimizer : string or callable, optional (default: "fmin_l_bfgs_b")
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func' is the objective function to be maximized, which
# takes the hyperparameters theta as parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are::
'fmin_l_bfgs_b'
n_restarts_optimizer : int, optional (default: 0)
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that n_restarts_optimizer=0 implies that one
run is performed.
max_iter_predict : int, optional (default: 100)
The maximum number of iterations in Newton's method for approximating
the posterior during predict. Smaller values will reduce computation
time at the cost of worse results.
warm_start : bool, optional (default: False)
If warm-starts are enabled, the solution of the last Newton iteration
on the Laplace approximation of the posterior mode is used as
initialization for the next call of _posterior_mode(). This can speed
up convergence when _posterior_mode is called several times on similar
problems as in hyperparameter optimization.
copy_X_train : bool, optional (default: True)
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
multi_class : string, default : "one_vs_rest"
Specifies how multi-class classification problems are handled.
Supported are "one_vs_rest" and "one_vs_one". In "one_vs_rest",
one binary Gaussian process classifier is fitted for each class, which
is trained to separate this class from the rest. In "one_vs_one", one
binary Gaussian process classifier is fitted for each pair of classes,
which is trained to separate these two classes. The predictions of
these binary predictors are combined into multi-class predictions.
Note that "one_vs_one" does not support predicting probability
estimates.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is
useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are
used. Thus for n_jobs = -2, all CPUs but one are used.
Attributes
----------
kernel_ : kernel object
The kernel used for prediction. In case of binary classification,
the structure of the kernel is the same as the one passed as parameter
but with optimized hyperparameters. In case of multi-class
classification, a CompoundKernel is returned which consists of the
different kernels used in the one-versus-rest classifiers.
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``
classes_ : array-like, shape = (n_classes,)
Unique class labels.
n_classes_ : int
The number of classes in the training data
.. versionadded:: 0.18
"""
def __init__(self, kernel=None, optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0, max_iter_predict=100,
warm_start=False, copy_X_train=True, random_state=None,
multi_class="one_vs_rest", n_jobs=1):
self.kernel = kernel
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.max_iter_predict = max_iter_predict
self.warm_start = warm_start
self.copy_X_train = copy_X_train
self.random_state = random_state
self.multi_class = multi_class
self.n_jobs = n_jobs
def fit(self, X, y):
"""Fit Gaussian process classification model
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Training data
y : array-like, shape = (n_samples,)
Target values, must be binary
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=False)
self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace(
self.kernel, self.optimizer, self.n_restarts_optimizer,
self.max_iter_predict, self.warm_start, self.copy_X_train,
self.random_state)
self.classes_ = np.unique(y)
self.n_classes_ = self.classes_.size
if self.n_classes_ == 1:
raise ValueError("GaussianProcessClassifier requires 2 or more "
"distinct classes. Only class %s present."
% self.classes_[0])
if self.n_classes_ > 2:
if self.multi_class == "one_vs_rest":
self.base_estimator_ = \
OneVsRestClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
elif self.multi_class == "one_vs_one":
self.base_estimator_ = \
OneVsOneClassifier(self.base_estimator_,
n_jobs=self.n_jobs)
else:
raise ValueError("Unknown multi-class mode %s"
% self.multi_class)
self.base_estimator_.fit(X, y)
if self.n_classes_ > 2:
self.log_marginal_likelihood_value_ = np.mean(
[estimator.log_marginal_likelihood()
for estimator in self.base_estimator_.estimators_])
else:
self.log_marginal_likelihood_value_ = \
self.base_estimator_.log_marginal_likelihood()
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array, shape = (n_samples,)
Predicted target values for X, values are from ``classes_``
"""
check_is_fitted(self, ["classes_", "n_classes_"])
X = check_array(X)
return self.base_estimator_.predict(X)
def predict_proba(self, X):
"""Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Returns
-------
C : array-like, shape = (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if self.n_classes_ > 2 and self.multi_class == "one_vs_one":
raise ValueError("one_vs_one multi-class mode does not support "
"predicting probability estimates. Use "
"one_vs_rest mode instead.")
X = check_array(X)
return self.base_estimator_.predict_proba(X)
@property
def kernel_(self):
if self.n_classes_ == 2:
return self.base_estimator_.kernel_
else:
return CompoundKernel(
[estimator.kernel_
for estimator in self.base_estimator_.estimators_])
def log_marginal_likelihood(self, theta=None, eval_gradient=False):
"""Returns log-marginal likelihood of theta for training data.
In the case of multi-class classification, the mean log-marginal
likelihood of the one-versus-rest classifiers are returned.
Parameters
----------
theta : array-like, shape = (n_kernel_params,) or none
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. In the case of multi-class classification, theta may
be the hyperparameters of the compound kernel or of an individual
kernel. In the latter case, all individual kernel get assigned the
same theta values. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default: False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. Note that gradient computation is not supported
for non-binary classification. If True, theta must not be None.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : array, shape = (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
check_is_fitted(self, ["classes_", "n_classes_"])
if theta is None:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
theta = np.asarray(theta)
if self.n_classes_ == 2:
return self.base_estimator_.log_marginal_likelihood(
theta, eval_gradient)
else:
if eval_gradient:
raise NotImplementedError(
"Gradient of log-marginal-likelihood not implemented for "
"multi-class GPC.")
estimators = self.base_estimator_.estimators_
n_dims = estimators[0].kernel_.n_dims
if theta.shape[0] == n_dims: # use same theta for all sub-kernels
return np.mean(
[estimator.log_marginal_likelihood(theta)
for i, estimator in enumerate(estimators)])
elif theta.shape[0] == n_dims * self.classes_.shape[0]:
# theta for compound kernel
return np.mean(
[estimator.log_marginal_likelihood(
theta[n_dims * i:n_dims * (i + 1)])
for i, estimator in enumerate(estimators)])
else:
raise ValueError("Shape of theta must be either %d or %d. "
"Obtained theta with shape %d."
% (n_dims, n_dims * self.classes_.shape[0],
theta.shape[0]))
| bsd-3-clause |
mfouesneau/pyphot | pyphot/astropy/sandbox.py | 1 | 65998 | """
Sandbox of new developments
Use at your own risks
Photometric package using Astropy Units
=======================================
Defines a Filter class and associated functions to extract photometry.
This also include functions to keep libraries up to date
.. note::
integrations are done using :func:`trapz`
Why not Simpsons? Simpsons principle is to take sequence of 3 points to
make a quadratic interpolation. Which in the end, when filters have sharp
edges, the error due to this "interpolation" are extremely large in
comparison to the uncertainties induced by trapeze integration.
"""
from __future__ import print_function, division
import os
from functools import wraps
import numpy as np
import tables
from scipy.integrate import trapz
from .simpletable import SimpleTable
from .vega import Vega
from .config import libsdir
# from .licks import LickIndex, LickLibrary
# directories
# __default__ = libsdir + '/filters.hd5'
# __default__ = libsdir + '/filters'
__default__ = libsdir + '/new_filters.hd5'
__default_lick__ = libsdir + '/licks.dat'
from astropy.units import Unit
from astropy import constants
class Constants(object):
""" A namespace for constants """
# Planck's constant in erg * sec
h = constants.h.to('erg * s')
# Speed of light in cm/s
c = constants.c.to('AA/s')
def hasUnit(val):
""" Check is an object has units """
return hasattr(val, 'unit') or hasattr(val, 'units')
class set_method_default_units(object):
""" Decorator for classmethods that makes sure that
the inputs of slamb, sflux are in given units
expects the decorated method to be defined as
>> def methodname(self, lamb, flux)
"""
def __init__(self, wavelength_unit, flux_unit, output_unit=None):
self.wavelength_unit = Unit(wavelength_unit)
self.flux_unit = Unit(flux_unit)
self.output_unit = output_unit
@classmethod
def force_units(cls, value, unit):
if unit is None:
return value
try:
return value.to(unit)
except AttributeError:
msg = 'Warning: assuming {0:s} units to unitless object.'
print(msg.format(str(unit)))
return value * unit
def __call__(self, func):
@wraps(func)
def wrapper(filter_, slamb, sflux, *args, **kwargs):
_slamb = set_method_default_units.force_units(slamb,
self.wavelength_unit)
_sflux = set_method_default_units.force_units(sflux,
self.flux_unit)
output = func(filter_, _slamb, _sflux, *args, **kwargs)
return set_method_default_units.force_units(output,
self.output_unit)
return wrapper
def _drop_units(q):
""" Drop the unit definition silently """
try:
return q.value
except AttributeError:
try:
return q.value
except AttributeError:
return q
class UnitFilter(object):
""" Evolution of Filter that makes sure the input spectra and output fluxes
have units to avoid mis-interpretation.
Note the usual (non SI) units of flux definitions:
flam = erg/s/cm**2/AA
fnu = erg/s/cm**2/Hz
photflam = photon/s/cm**2/AA
photnu = photon/s/cm**2/Hz
Define a filter by its name, wavelength and transmission
The type of detector (energy or photon counter) can be specified for
adapting calculations. (default: photon)
Attributes
----------
name: str
name of the filter
cl: float
central wavelength of the filter
norm: float
normalization factor of the filter
lpivot: float
pivot wavelength of the filter
wavelength: ndarray
wavelength sequence defining the filter transmission curve
transmit: ndarray
transmission curve of the filter
dtype: str
detector type, either "photon" or "energy" counter
unit: str
wavelength units
"""
def __init__(self, wavelength, transmit, name='', dtype="photon",
unit=None):
"""Constructor"""
self.name = name
self.set_dtype(dtype)
try: # get units from the inputs
self._wavelength = wavelength.value
unit = str(wavelength.unit)
except AttributeError:
self._wavelength = wavelength
self.set_wavelength_unit(unit)
# make sure input data are ordered and cleaned of weird values.
idx = np.argsort(self._wavelength)
self._wavelength = self._wavelength[idx]
self.transmit = np.clip(transmit[idx], 0., np.nanmax(transmit))
self.norm = trapz(self.transmit, self._wavelength)
self._lT = trapz(self._wavelength * self.transmit, self._wavelength)
self._lpivot = self._calculate_lpivot()
if self.norm > 0:
self._cl = self._lT / self.norm
else:
self._cl = 0.
def _calculate_lpivot(self):
if self.transmit.max() <= 0:
return 0.
if 'photon' in self.dtype:
lpivot2 = self._lT / trapz(self.transmit / self._wavelength,
self._wavelength)
else:
lpivot2 = self.norm / trapz(self.transmit / self._wavelength ** 2,
self._wavelength)
return np.sqrt(lpivot2)
def set_wavelength_unit(self, unit):
""" Set the wavelength units """
try: # get units from the inputs
self.wavelength_unit = str(self._wavelength.unit)
except AttributeError:
self.wavelength_unit = unit
def set_dtype(self, dtype):
""" Set the detector type (photon or energy)"""
_d = dtype.lower()
if "phot" in _d:
self.dtype = "photon"
elif "ener" in _d:
self.dtype = "energy"
else:
raise ValueError('Unknown detector type {0}'.format(dtype))
def info(self, show_zeropoints=True):
""" display information about the current filter"""
msg = """Filter object information:
name: {s.name:s}
detector type: {s.dtype:s}
wavelength units: {s.wavelength_unit}
central wavelength: {s.cl:f}
pivot wavelength: {s.lpivot:f}
effective wavelength: {s.leff:f}
photon wavelength: {s.lphot:f}
minimum wavelength: {s.lmin:f}
maximum wavelength: {s.lmax:f}
norm: {s.norm:f}
effective width: {s.width:f}
fullwidth half-max: {s.fwhm:f}
definition contains {s.transmit.size:d} points"""
print(msg.format(s=self).replace('None', 'unknown'))
# zero points only if units
if (self.wavelength_unit is None) or (not show_zeropoints):
return
print("""
Zeropoints
Vega: {s.Vega_zero_mag:f} mag,
{s.Vega_zero_flux},
{s.Vega_zero_Jy}
{s.Vega_zero_photons}
AB: {s.AB_zero_mag:f} mag,
{s.AB_zero_flux},
{s.AB_zero_Jy}
ST: {s.ST_zero_mag:f} mag,
{s.ST_zero_flux},
{s.ST_zero_Jy}
""".format(s=self))
def __repr__(self):
return "Filter: {0:s}, {1:s}".format(self.name, object.__repr__(self))
@property
def wavelength(self):
""" Unitwise wavelength definition """
if self.wavelength_unit is not None:
return self._wavelength * Unit(self.wavelength_unit)
else:
return self._wavelength
@property
def lmax(self):
""" Calculated as the last value with a transmission at least 1% of
maximum transmission """
cond = (self.transmit / self.transmit.max()) > 1./100
return max(self.wavelength[cond])
@property
def lmin(self):
""" Calculate das the first value with a transmission at least 1% of
maximum transmission """
cond = (self.transmit / self.transmit.max()) > 1./100
return min(self.wavelength[cond])
@property
def width(self):
""" Effective width
Equivalent to the horizontal size of a rectangle with height equal
to maximum transmission and with the same area that the one covered by
the filter transmission curve.
W = int(T dlamb) / max(T)
"""
return (self.norm / max(self.transmit)) * Unit(self.wavelength_unit)
@property
def fwhm(self):
""" the difference between the two wavelengths for which filter
transmission is half maximum
..note::
This calculation is not exact but rounded to the nearest passband
data points
"""
vals = self.transmit / self.transmit.max() - 0.5
zero_crossings = np.where(np.diff(np.sign(vals)))[0]
lambs = self.wavelength[zero_crossings]
return np.diff(lambs)[0]
@property
def lpivot(self):
""" Unitwise wavelength definition """
if self.wavelength_unit is not None:
return self._lpivot * Unit(self.wavelength_unit)
else:
return self._lpivot
@property
def cl(self):
""" Unitwise wavelength definition """
if self.wavelength_unit is not None:
return self._cl * Unit(self.wavelength_unit)
else:
return self._cl
@property
def leff(self):
""" Unitwise Effective wavelength
leff = int (lamb * T * Vega dlamb) / int(T * Vega dlamb)
"""
with Vega() as v:
s = self.reinterp(v.wavelength)
w = s._wavelength
if s.transmit.max() > 0:
leff = np.trapz(w * s.transmit * v.flux.value, w, axis=-1)
leff /= np.trapz(s.transmit * v.flux.value, w, axis=-1)
else:
leff = float('nan')
if s.wavelength_unit is not None:
leff = leff * Unit(s.wavelength_unit)
if self.wavelength_unit is not None:
return leff.to(self.wavelength_unit)
return leff
else:
return leff
@classmethod
def _validate_sflux(cls, slamb, sflux):
""" clean data for inf in input """
_sflux = _drop_units(sflux)
_slamb = _drop_units(slamb)
if True in np.isinf(sflux):
indinf = np.where(np.isinf(_sflux))
indfin = np.where(np.isfinite(_sflux))
_sflux[indinf] = np.interp(_slamb[indinf], _slamb[indfin],
_sflux[indfin], left=0, right=0)
try:
_unit = str(sflux.unit)
return _sflux * Unit(_unit)
except AttributeError:
return _sflux
@classmethod
def _get_zero_like(cls, sflux, axis=-1):
"""return a zero value corresponding to a flux calculation on sflux"""
# _sflux = _drop_units(sflux)
# shape = _sflux.shape
# if axis < 0:
# axis = len(shape) + axis
# newshape = shape[:axis] + shape[axis + 1:]
# return np.zeros(newshape, _sflux.dtype)
return np.zeros_like(sflux).sum(axis=axis)
@property
def lphot(self):
""" Photon distribution based effective wavelength. Defined as
lphot = int(lamb ** 2 * T * Vega dlamb) / int(lamb * T * Vega dlamb)
which we calculate as
lphot = get_flux(lamb * vega) / get_flux(vega)
"""
if self.wavelength_unit is None:
raise AttributeError('Needs wavelength units')
with Vega() as v:
wave = v.wavelength.value
# Cheating units to avoid making a new filter
f_vega = self.get_flux(v.wavelength, v.flux, axis=-1)
f_lamb_vega = self.get_flux(v.wavelength, wave * v.flux, axis=-1)
f_lamb2_vega = self.get_flux(v.wavelength, wave ** 2 * v.flux,
axis=-1)
if 'photon' in self.dtype:
lphot = (f_lamb_vega / f_vega)
else:
lphot = f_lamb2_vega / f_lamb_vega
return (lphot * Unit(str(v.wavelength.unit))).to(self.wavelength_unit)
def _get_filter_in_units_of(self, slamb=None):
w = self.wavelength
if hasUnit(slamb) & hasUnit(w):
return w.to(str(slamb.unit)).value
else:
print("Warning: assuming units are consistent")
return self._wavelength
@set_method_default_units('AA', 'flam',
output_unit='photon*s**-1*cm**-2*AA**-1')
def get_Nphotons(self, slamb, sflux, axis=-1):
"""getNphot the number of photons through the filter
(Ntot / width in the documentation)
getflux() * leff / hc
Parameters
----------
slamb: ndarray(dtype=float, ndim=1)
spectrum wavelength definition domain
sflux: ndarray(dtype=float, ndim=1)
associated flux in erg/s/cm2/AA
Returns
-------
N: float
Number of photons of the spectrum within the filter
"""
passb = self.reinterp(slamb)
wave = passb._wavelength
dlambda = np.diff(wave)
# h = 6.626075540e-27 # erg * s
# c = 2.99792458e18 # AA / s
h = Constants.h.to('erg * s').value
c = Constants.c.to('AA/s').value
vals = sflux.value * wave * passb.transmit
vals[~np.isfinite(vals)] = 0.
Nphot = 0.5 * np.sum((vals[1:] + vals[:-1]) * dlambda) / (h * c)
Nphot = Nphot * Unit('photon*s**-1*cm**-2')
return Nphot / passb.width # photons / cm2 / s / A
@property
def Vega_zero_photons(self):
""" Vega number of photons per wavelength unit
.. note::
see `self.get_Nphotons`
"""
with Vega() as v:
return self.get_Nphotons(v.wavelength, v.flux)
@set_method_default_units('AA', 'flam',
output_unit='erg*s**-1*cm**-2*AA**-1')
def get_flux(self, slamb, sflux, axis=-1):
"""getFlux
Integrate the flux within the filter and return the integrated energy
If you consider applying the filter to many spectra, you might want to
consider extractSEDs.
Parameters
----------
slamb: ndarray(dtype=float, ndim=1)
spectrum wavelength definition domain
sflux: ndarray(dtype=float, ndim=1)
associated flux
Returns
-------
flux: float
Energy of the spectrum within the filter
"""
passb = self.reinterp(slamb)
ifT = passb.transmit
_slamb = _drop_units(slamb)
_sflux = _drop_units(passb._validate_sflux(slamb, sflux))
_w_unit = str(slamb.unit)
_f_unit = str(sflux.unit)
# if the filter is null on that wavelength range flux is then 0
# ind = ifT > 0.
nonzero = np.where(ifT > 0)[0]
if nonzero.size <= 0:
return passb._get_zero_like(sflux)
# avoid calculating many zeros
nonzero_start = max(0, min(nonzero) - 5)
nonzero_end = min(len(ifT), max(nonzero) + 5)
ind = np.zeros(len(ifT), dtype=bool)
ind[nonzero_start:nonzero_end] = True
if True in ind:
try:
_sflux = _sflux[:, ind]
except Exception:
_sflux = _sflux[ind]
# limit integrals to where necessary
if 'photon' in passb.dtype:
a = np.trapz(_slamb[ind] * ifT[ind] * _sflux, _slamb[ind],
axis=axis)
b = np.trapz(_slamb[ind] * ifT[ind], _slamb[ind])
a = a * Unit('*'.join((_w_unit, _f_unit, _w_unit)))
b = b * Unit('*'.join((_w_unit, _w_unit)))
elif 'energy' in passb.dtype:
a = np.trapz(ifT[ind] * _sflux, _slamb[ind], axis=axis)
b = np.trapz(ifT[ind], _slamb[ind])
a = a * Unit('*'.join((_f_unit, _w_unit)))
b = b * Unit(_w_unit)
if (np.isinf(a.value).any() | np.isinf(b.value).any()):
print(self.name, "Warn for inf value")
return a / b
else:
return passb._get_zero_like(_sflux)
def getFlux(self, slamb, sflux, axis=-1):
"""
Integrate the flux within the filter and return the integrated energy
If you consider applying the filter to many spectra, you might want to
consider extractSEDs.
Parameters
----------
slamb: ndarray(dtype=float, ndim=1)
spectrum wavelength definition domain
sflux: ndarray(dtype=float, ndim=1)
associated flux
Returns
-------
flux: float
Energy of the spectrum within the filter
"""
return self.get_flux(slamb, sflux, axis=axis)
def reinterp(self, lamb):
""" reinterpolate filter onto a different wavelength definition """
_wavelength = self._get_filter_in_units_of(lamb)
_lamb = _drop_units(lamb)
try:
_unit = str(lamb.unit)
except Exception:
_unit = self.wavelength_unit
ifT = np.interp(_lamb, _wavelength, self.transmit, left=0., right=0.)
return self.__class__(_lamb, ifT, name=self.name, dtype=self.dtype,
unit=_unit)
def __call__(self, slamb, sflux):
return self.applyTo(slamb, sflux)
def apply_transmission(self, slamb, sflux):
"""
Apply filter transmission to a spectrum (with reinterpolation of the
filter)
Parameters
----------
slamb: ndarray
spectrum wavelength definition domain
sflux: ndarray
associated flux
Returns
-------
flux: float
new spectrum values accounting for the filter
"""
_wavelength = self._get_filter_in_units_of(slamb)
_lamb = _drop_units(slamb)
ifT = np.interp(_lamb, _wavelength, self.transmit, left=0., right=0.)
return ifT * sflux
def applyTo(self, slamb, sflux):
""" For compatibility but bad name """
return self.apply_transmission(slamb, sflux)
@classmethod
def from_ascii(cls, fname, dtype='csv', **kwargs):
""" Load filter from ascii file """
lamb = kwargs.pop('lamb', None)
name = kwargs.pop('name', None)
detector = kwargs.pop('detector', 'photon')
unit = kwargs.pop('unit', None)
t = SimpleTable(fname, dtype=dtype, **kwargs)
w = t['WAVELENGTH'].astype(float)
r = t['THROUGHPUT'].astype(float)
# update properties from file header
detector = t.header.get('DETECTOR', detector)
unit = t.header.get('WAVELENGTH_UNIT', unit)
name = t.header.get('NAME', name)
# try from the comments in the header first
if name in (None, 'None', 'none', ''):
name = [k.split()[1]
for k in t.header.get('COMMENT', '').split('\n')
if 'COMPNAME' in k]
name = ''.join(name).replace('"', '').replace("'", '')
# if that did not work try the table header directly
if name in (None, 'None', 'none', ''):
name = t.header['NAME']
_filter = UnitFilter(w, r, name=name, dtype=detector, unit=unit)
# reinterpolate if requested
if lamb is not None:
_filter = _filter.reinterp(lamb)
return _filter
def write_to(self, fname, **kwargs):
""" Export filter to a file
Parameters
----------
fname: str
filename
Uses `SimpleTable.write` parameters
"""
data = self.to_Table()
data.write(fname, **kwargs)
def to_Table(self, **kwargs):
""" Export filter to a SimpleTable object
Parameters
----------
fname: str
filename
Uses `SimpleTable` parameters
"""
data = SimpleTable({'WAVELENGTH': self._wavelength,
'THROUGHPUT': self.transmit})
if self.wavelength_unit is not None:
data.header['WAVELENGTH_UNIT'] = self.wavelength_unit
data.header['DETECTOR'] = self.dtype
data.header['COMPNAME'] = str(self.name)
data.header['NAME'] = str(self.name)
data.set_comment('THROUGHPUT', 'filter throughput definition')
data.set_comment('WAVELENGTH', 'filter wavelength definition')
data.set_comment('WAVELENGTH', self.wavelength_unit or 'AA')
return data
def to_dict(self):
""" Return a dictionary of the filter """
data = {'WAVELENGTH': self._wavelength, 'THROUGHPUT': self.transmit}
if self.wavelength_unit is not None:
data['WAVELENGTH_UNIT'] = self.wavelength_unit
data['DETECTOR'] = self.dtype
data['NAME'] = self.name
data['PIVOT'] = self._lpivot
data['CENTRAL'] = self._cl
data['EFFECTIVE'] = _drop_units(self.leff)
data['NORM'] = self.norm
return data
@classmethod
def make_integration_filter(cls, lmin, lmax, name='', dtype='photon',
unit=None):
""" Generate an heavyside filter between lmin and lmax """
dyn = lmax - lmin
try:
unit = str(dyn.unit)
dyn = _drop_units(dyn)
except Exception:
pass
w = np.array([lmin - 0.01 * dyn, lmin, lmax, lmax + 0.01 * dyn])
f = np.array([0., 1., 1., 0.])
return UnitFilter(w, f, name=name, dtype=dtype, unit=unit)
@property
def AB_zero_mag(self):
""" AB magnitude zero point
ABmag = -2.5 * log10(f_nu) - 48.60
= -2.5 * log10(f_lamb) - 2.5 * log10(lpivot ** 2 / c) - 48.60
= -2.5 * log10(f_lamb) - zpts
"""
if self.wavelength_unit is None:
raise AttributeError('Needs wavelength units')
C1 = (Unit(self.wavelength_unit).to('AA') ** 2 /
Constants.c.to('AA/s').value)
c1 = self._lpivot ** 2 * C1
m = 2.5 * np.log10(_drop_units(c1)) + 48.6
return m
@property
def AB_zero_flux(self):
""" AB flux zero point in erg/s/cm2/AA """
return 10 ** (-0.4 * self.AB_zero_mag) * Unit('erg*s**-1*cm**-2*AA**-1')
@property
def AB_zero_Jy(self):
""" AB flux zero point in Jansky (Jy) """
c = 1e-8 * Constants.c.to('m/s').value
f = 1e5 / c * self.lpivot.to('AA').value ** 2 * self.AB_zero_flux.value
return f * Unit('Jy')
@property
def Vega_zero_mag(self):
""" vega magnitude zero point
vegamag = -2.5 * log10(f_lamb) + 2.5 * log10(f_vega)
vegamag = -2.5 * log10(f_lamb) - zpts
"""
flux = self.Vega_zero_flux.value
if flux > 0:
return -2.5 * np.log10(flux)
else:
return float('nan')
@property
def Vega_zero_flux(self):
""" Vega flux zero point in erg/s/cm2/AA """
with Vega() as v:
f_vega = self.get_flux(v.wavelength, v.flux, axis=-1)
return f_vega
@property
def Vega_zero_Jy(self):
""" Vega flux zero point in Jansky (Jy) """
c = 1e-8 * Constants.c.to('m/s').value
f = 1e5 / c * (self.lpivot.to('AA').value ** 2 *
self.Vega_zero_flux.to('erg*s**-1*cm**-2*AA**-1').value)
return f * Unit('Jy')
@property
def ST_zero_mag(self):
""" ST magnitude zero point
STmag = -2.5 * log10(f_lamb) -21.1
"""
return 21.1
@property
def ST_zero_flux(self):
""" ST flux zero point in erg/s/cm2/AA """
return 10 ** (-0.4 * self.ST_zero_mag) * Unit('erg*s**-1*cm**-2*AA**-1')
@property
def ST_zero_Jy(self):
""" ST flux zero point in Jansky (Jy) """
c = 1e-8 * Constants.c.to('m/s').value
f = 1e5 / c * self.lpivot.to('AA').value ** 2 * self.ST_zero_flux.value
return f * Unit('Jy')
class UncertainFilter(UnitFilter):
""" What could be a filter with uncertainties
Attributes
----------
wavelength: ndarray
wavelength sequence defining the filter transmission curve
mean_: Filter
mean passband transmission
samples_: sequence(Filter)
samples from the uncertain passband transmission model
name: string
name of the passband
dtype: str
detector type, either "photon" or "energy" counter
unit: str
wavelength units
"""
def __init__(self, wavelength, mean_transmit, samples,
name='', dtype='photon', unit=None):
""" Constructor """
self.mean_ = UnitFilter(wavelength, mean_transmit,
name=name, dtype=dtype, unit=unit)
self.samples_ = [UnitFilter(wavelength, transmit_k,
name=name + '_{0:d}'.format(num),
dtype=dtype, unit=unit)
for (num, transmit_k) in enumerate(samples)]
self.name = name
self.dtype = self.mean_.dtype
self.model_ = None
@classmethod
def from_gp_model(cls, model, xprime=None, n_samples=10, **kwargs):
""" Generate a filter object from a sklearn GP model
Parameters
----------
model: sklearn.gaussian_process.GaussianProcessRegressor
model of the passband
xprime: ndarray
wavelength to express the model in addition to the training points
n_samples: int
number of samples to generate from the model.
**kwawrgs: dict
UncertainFilter keywords
"""
if xprime is None:
xpred = model.X_train_
else:
xpred = np.unique(np.hstack([_drop_units(xprime),
model.X_train_.ravel()]))
xpred = xpred.reshape(1, -1).T
unit_ = kwargs.pop('unit', None)
if unit_ is None:
unit_ = str(getattr(xprime, 'units', None))
mean_transmit, _ = model.predict(xpred, return_std=True)
samples = model.sample_y(xpred, n_samples=n_samples)
unc_filter = cls(xpred.ravel(),
mean_transmit,
samples.T, unit=unit_, **kwargs)
unc_filter.model_ = model
return unc_filter
def info(self, show_zeropoints=True):
""" display information about the current filter"""
string = self.mean_.info(show_zeropoints)
string = string.replace('Filter object information',
'Filter object mean information only')
return string
def set_dtype(self, dtype):
""" Set the detector type (photon or energy)"""
self.mean_.set_dtype(dtype)
for filter_k in self.samples_:
filter_k.set_dtype(dtype)
self.dtype = self.mean_.dtype
def set_wavelength_unit(self, unit):
""" Set the wavelength units """
self.mean_.set_wavelength_unit(unit)
for filter_k in self.samples_:
filter_k.set_wavelength_unit(unit)
@property
def wavelength(self):
""" Unitwise wavelength definition """
return self.mean_.wavelength
@property
def wavelength_unit(self):
""" Unit wavelength definition """
return self.mean_.wavelength_unit
@property
def _wavelength(self):
""" Unitless wavelength definition """
return self.mean_._wavelength
@property
def transmit(self):
""" Transmission curves """
return self._get_mean_and_samples_attribute('transmit')
def _get_samples_attribute(self, attr, *args, **kwargs):
""" Returns the attribute from all samples """
try:
vals = [getattr(fk, attr)(*args, **kwargs) for fk in self.samples_]
except TypeError:
vals = [getattr(fk, attr) for fk in self.samples_]
try:
unit_ = Unit(str(vals[0].unit))
return np.array([v.value for v in vals]) * unit_
except AttributeError:
return np.array(vals)
def _get_mean_attribute(self, attr, *args, **kwargs):
""" Returns the attribute from the mean passband """
attr = getattr(self.mean_, attr)
try:
return attr(*args, **kwargs)
except TypeError:
return attr
def _get_mean_and_samples_attribute(self, attr, *args, **kwargs):
""" Compute / extract mean and smapled filter attributes
Parameters
----------
attr: str
attribute to get (can be a callable attribute)
args: sequence
any argument of attr
kwargs: dict
any keywords for attr
Returns
-------
mean_: object
value from the mean passband
samples_: sequence(object)
values from each sampled passband
"""
return (self._get_mean_attribute(attr, *args, **kwargs),
self._get_samples_attribute(attr, *args, **kwargs))
@property
def lmax(self):
""" Calculated as the last value with a transmission at least 1% of
maximum transmission """
return self._get_mean_and_samples_attribute('lmax')
@property
def lmin(self):
""" Calculate das the first value with a transmission at least 1% of
maximum transmission """
return self._get_mean_and_samples_attribute('lmin')
@property
def width(self):
""" Effective width
Equivalent to the horizontal size of a rectangle with height equal
to maximum transmission and with the same area that the one covered by
the filter transmission curve.
W = int(T dlamb) / max(T)
"""
return self._get_mean_and_samples_attribute('width')
@property
def fwhm(self):
""" the difference between the two wavelengths for which filter
transmission is half maximum
..note::
This calculation is not exact but rounded to the nearest passband
data points
"""
return self._get_mean_and_samples_attribute('fwhm')
@property
def lpivot(self):
""" Unitwise wavelength definition """
return self._get_mean_and_samples_attribute('lpivot')
@property
def cl(self):
""" Unitwise wavelength definition """
return self._get_mean_and_samples_attribute('cl')
@property
def leff(self):
""" Unitwise Effective wavelength
leff = int (lamb * T * Vega dlamb) / int(T * Vega dlamb)
"""
return self._get_mean_and_samples_attribute('leff')
@property
def lphot(self):
""" Photon distribution based effective wavelength. Defined as
lphot = int(lamb ** 2 * T * Vega dlamb) / int(lamb * T * Vega dlamb)
which we calculate as
lphot = get_flux(lamb * vega) / get_flux(vega)
"""
return self._get_mean_and_samples_attribute('lphot')
def get_Nphotons(self, slamb, sflux, axis=-1):
"""getNphot the number of photons through the filter
(Ntot / width in the documentation)
getflux() * leff / hc
Parameters
----------
slamb: ndarray(dtype=float, ndim=1)
spectrum wavelength definition domain
sflux: ndarray(dtype=float, ndim=1)
associated flux in erg/s/cm2/AA
Returns
-------
N: float
Number of photons of the spectrum within the filter
"""
mean, samples = self._get_mean_and_samples_attribute('get_Nphotons',
slamb, sflux,
axis=axis)
return mean, samples
@property
def Vega_zero_photons(self):
""" Vega number of photons per wavelength unit
.. note::
see `self.get_Nphotons`
"""
return self._get_mean_and_samples_attribute('Vega_zero_photons')
def getFlux(self, slamb, sflux, axis=-1):
"""getFlux
Integrate the flux within the filter and return the integrated energy
If you consider applying the filter to many spectra, you might want to
consider extractSEDs.
Parameters
----------
slamb: ndarray(dtype=float, ndim=1)
spectrum wavelength definition domain
sflux: ndarray(dtype=float, ndim=1)
associated flux
Returns
-------
flux: float
Energy of the spectrum within the filter
"""
mean, samples = self._get_mean_and_samples_attribute('getFlux',
slamb, sflux,
axis=axis)
return mean, samples
def reinterp(self, lamb):
""" reinterpolate filter onto a different wavelength definition """
mean, samples = self._get_mean_and_samples_attribute('reinterp')
mean_val = mean(lamb)
samp_val = [sk(mean_val.wavelength) for sk in samples]
samp_transmissions = [sk.transmit for sk in samp_val]
return self.__class__(mean_val.wavelength, mean_val.transmit,
samp_transmissions, name=self.name,
dtype=mean_val.dtype,
unit=mean_val.wavelength_unit)
def apply_transmission(self, slamb, sflux):
"""
Apply filter transmission to a spectrum
(with reinterpolation of the filter)
Parameters
----------
slamb: ndarray
spectrum wavelength definition domain
sflux: ndarray
associated flux
Returns
-------
flux: float
new spectrum values accounting for the filter
"""
mean, samples = self._get_mean_and_samples_attribute('apply_transmission')
mean_val = mean(slamb, sflux)
samp_val = [sk(slamb, sflux) for sk in samples]
return mean_val, samp_val
@property
def AB_zero_mag(self):
""" AB magnitude zero point
ABmag = -2.5 * log10(f_nu) - 48.60
= -2.5 * log10(f_lamb) - 2.5 * log10(lpivot ** 2 / c) - 48.60
= -2.5 * log10(f_lamb) - zpts
"""
return self._get_mean_and_samples_attribute('AB_zero_mag')
@property
def AB_zero_flux(self):
""" AB flux zero point in erg/s/cm2/AA """
return self._get_mean_and_samples_attribute('AB_zero_flux')
@property
def AB_zero_Jy(self):
""" AB flux zero point in Jansky (Jy) """
return self._get_mean_and_samples_attribute('AB_zero_Jy')
@property
def Vega_zero_mag(self):
""" Vega magnitude zero point
Vegamag = -2.5 * log10(f_lamb) + 2.5 * log10(f_vega)
Vegamag = -2.5 * log10(f_lamb) - zpts
"""
return self._get_mean_and_samples_attribute('Vega_zero_mag')
@property
def Vega_zero_flux(self):
""" Vega flux zero point in erg/s/cm2/AA """
return self._get_mean_and_samples_attribute('Vega_zero_flux')
@property
def Vega_zero_Jy(self):
""" Vega flux zero point in Jansky (Jy) """
return self._get_mean_and_samples_attribute('Vega_zero_Jy')
@property
def ST_zero_mag(self):
""" ST magnitude zero point
STmag = -2.5 * log10(f_lamb) -21.1
"""
return 21.1
@property
def ST_zero_flux(self):
""" ST flux zero point in erg/s/cm2/AA """
return 10 ** (-0.4 * self.ST_zero_mag) * Unit('erg*s-1*cm-2*AA-1')
@property
def ST_zero_Jy(self):
""" ST flux zero point in Jansky (Jy) """
return self._get_mean_and_samples_attribute('ST_zero_Jy')
def to_Table(self, **kwargs):
""" Export filter to a SimpleTable object
Parameters
----------
fname: str
filename
Uses `SimpleTable` parameters
"""
mean_transmit, transmit_ = self.transmit
data_ = {'WAVELENGTH': self._wavelength,
'THROUGHPUT': mean_transmit}
for num, filterk in enumerate(transmit_, 1):
data_['THROUGHPUT_{0:d}'.format(num)] = filterk
data = SimpleTable(data_)
if self.wavelength_unit is not None:
data.header['WAVELENGTH_UNIT'] = self.wavelength_unit
data.header['DETECTOR'] = self.dtype
data.header['COMPNAME'] = self.name
data.header['NAME'] = self.name
data.set_comment('THROUGHPUT', 'filter throughput definition')
data.set_comment('WAVELENGTH', 'filter wavelength definition')
for num in range(1, len(transmit_) + 1):
data.set_comment('THROUGHPUT_{0:d}'.format(num),
'filter throughput sample')
data.set_comment('WAVELENGTH', self.wavelength_unit or 'AA')
return data
@classmethod
def from_ascii(cls, fname, dtype='csv', **kwargs):
""" Load filter from ascii file """
lamb = kwargs.pop('lamb', None)
name = kwargs.pop('name', None)
detector = kwargs.pop('detector', 'photon')
unit_ = kwargs.pop('unit', None)
if not isinstance(fname, SimpleTable):
t = SimpleTable(fname, dtype=dtype, **kwargs)
else:
t = fname
w = t['WAVELENGTH'].astype(float)
r = t['THROUGHPUT'].astype(float)
keys = [k for k in t.keys() if 'THROUGHPUT_' in k]
# update properties from file header
detector = t.header.get('DETECTOR', detector)
unit_ = t.header.get('WAVELENGTH_UNIT', unit_)
# try from the comments in the header first
if name in (None, 'None', 'none', ''):
name = [k.split()[1]
for k in t.header.get('COMMENT', '').split('\n')
if 'COMPNAME' in k]
name = ''.join(name).replace('"', '').replace("'", '')
# if that did not work try the table header directly
if name in (None, 'None', 'none', ''):
name = t.header['NAME']
if len(keys) > 0:
samp = np.array([t[key] for key in keys])
_filter = cls(w, r, samp, name=name, dtype=detector, unit=unit_)
else:
_filter = UnitFilter(w, r, name=name, dtype=detector, unit=unit_)
# reinterpolate if requested
if lamb is not None:
_filter = _filter.reinterp(lamb)
return _filter
class UnitLibrary(object):
""" Common grounds for filter libraries """
def __init__(self, source=__default__, *args, **kwargs):
""" Construct the library """
self.source = None
def __repr__(self):
msg = "Filter Library: {0}\n{1:s}"
return msg.format(self.source, object.__repr__(self))
def __enter__(self):
""" Enter context """
return self
def __exit__(self, *exc_info):
""" end context """
return False
def __len__(self):
""" Size of the library """
return len(self.content)
def to_csv(self, directory='./', progress=True, **kwargs):
""" Export each filter into a csv file with its own name
Parameters
----------
directory: str
directory to write into
progress: bool
show progress if set
"""
from .helpers import progress_enumerate
try:
os.stat(directory)
except Exception:
os.mkdir(directory)
with self as s:
for _, k in progress_enumerate(s.content, desc='export',
show_progress=progress):
f = s[k]
if f.wavelength_unit is None:
f.wavelength_unit = 'AA'
f.write_to("{0:s}/{1:s}.csv".format(directory, f.name).lower(),
fmt="%.6f", **kwargs)
def to_hdf(self, fname='filters.hd5', progress=True, **kwargs):
""" Export each filter into a csv file with its own name
Parameters
----------
directory: str
directory to write into
progress: bool
show progress if set
"""
from .helpers import progress_enumerate
with self as s:
for _, k in progress_enumerate(s.content, desc='export',
show_progress=progress):
f = s[k]
if f.wavelength_unit is None:
f.wavelength_unit = 'AA'
f.write_to("{0:s}".format(fname),
tablename='/filters/{0}'.format(f.name),
createparents=True, append=True, silent=True,
**kwargs)
@classmethod
def from_hd5(cls, filename, **kwargs):
return UnitHDF_Library(filename, **kwargs)
@classmethod
def from_ascii(cls, filename, **kwargs):
return UnitAscii_Library(filename, **kwargs)
@property
def content(self):
""" Get the content list """
return self.get_library_content()
def __getitem__(self, name):
""" Make this object like a dictionary and load one or multiple filters
"""
with self as s:
try:
f = s._load_filter(name)
except TypeError:
f = [s._load_filter(k) for k in name]
return f
def _load_filter(self, *args, **kwargs):
""" Load a given filter from the library """
raise NotImplementedError
def get_library_content(self):
""" get the content of the library """
raise NotImplementedError
def load_all_filters(self, interp=True, lamb=None):
""" load all filters from the library """
raise NotImplementedError
def add_filter(self, f):
""" add a filter to the library """
raise NotImplementedError
def find(self, name, case_sensitive=True):
r = []
if case_sensitive:
_n = name.lower()
for k in self.get_library_content():
if _n in k.lower():
r.append(k)
else:
for k in self.content:
if name in k:
r.append(k)
return r
class UnitAscii_Library(UnitLibrary):
""" Interface one or multiple directory or many files as a filter library
>>> lib = Ascii_Library(['ground', 'hst', 'myfilter.csv'])
"""
def __init__(self, source):
self.source = source
def _load_filter(self, fname, interp=True, lamb=None, *args, **kwargs):
""" Load a given filter from the library """
try:
fil = UnitFilter.from_ascii(fname, *args, **kwargs)
except Exception:
content = self.content
r = [k for k in content if fname in k]
if len(r) <= 0: # try all lower for filenames (ascii convention)
r = [k for k in content if fname.lower() in k]
if len(r) > 1:
print("auto correction found multiple choices")
print(r)
raise ValueError('Refine name to one of {0}'.format(r))
elif len(r) <= 0:
raise ValueError('Cannot find filter {0}'.format(fname))
else:
fil = UnitFilter.from_ascii(r[0], *args, **kwargs)
if (interp is True) and (lamb is not None):
return fil.reinterp(lamb)
else:
return fil
def get_library_content(self):
""" get the content of the library """
from glob import glob
try:
os.path.isdir(self.source)
lst = glob(self.source + '/*')
except TypeError:
lst = self.source
dircheck = True
while dircheck is True:
dircheck = False
newlst = []
for entry in lst:
if os.path.isdir(entry):
newlst.extend(glob(entry + '/*'))
dircheck = True
else:
newlst.append(entry)
lst = newlst
return lst
def load_all_filters(self, interp=True, lamb=None):
""" load all filters from the library """
return [self._load_filter(k, interp=interp, lamb=lamb)
for k in self.content]
def load_filters(self, names, interp=True, lamb=None, filterLib=None):
""" load a limited set of filters
Parameters
----------
names: list[str]
normalized names according to filtersLib
interp: bool
reinterpolate the filters over given lambda points
lamb: ndarray[float, ndim=1]
desired wavelength definition of the filter
filterLib: path
path to the filter library hd5 file
Returns
-------
filters: list[filter]
list of filter objects
"""
filters = [self._load_filter(fname, interp=interp, lamb=lamb)
for fname in names]
return(filters)
def add_filters(self, filter_object, fmt="%.6f", **kwargs):
""" Add a filter to the library permanently
Parameters
----------
filter_object: Filter object
filter to add
"""
if not isinstance(filter_object, UnitFilter):
msg = "Argument of type Filter expected. Got type {0}"
raise TypeError(msg.format(type(filter_object)))
if filter_object.wavelength_unit is None:
msg = "Filter wavelength must have units for storage."
raise AttributeError(msg)
fname = "{0:s}/{1:s}.csv".format(self.source, filter_object.name)
filter_object.write_to(fname.lower(),
fmt=fmt, **kwargs)
class UnitHDF_Library(UnitLibrary):
""" Storage based on HDF """
def __init__(self, source=__default__, mode='r'):
self.source = source
self.hdf = None
self.mode = mode
self._in_context = 0
def __enter__(self):
""" Enter context """
if self.hdf is None:
self.hdf = tables.open_file(self.source, self.mode)
self._in_context += 1
return self
def __exit__(self, *exc_info):
""" end context """
if (self.hdf is not None) and (self._in_context < 2) :
self.hdf.close()
self.hdf = None
self._in_context -= 1
return False
def _load_filter(self, fname, interp=True, lamb=None):
""" Load a given filter from the library
Parameters
----------
fname: str
normalized names according to filtersLib
interp: bool, optional
reinterpolate the filters over given lambda points
lamb: ndarray[float, ndim=1]
desired wavelength definition of the filter
integrationFilter: bool, optional
set True for specail integraion filter such as Qion or E_uv
if set, lamb should be given
Returns
-------
filter: Filter instance
filter object
"""
ftab = self.hdf
if hasattr(fname, 'decode'):
fnode = ftab.get_node('/filters/' + fname.decode('utf8'))
else:
fnode = ftab.get_node('/filters/' + fname)
flamb = fnode[:]['WAVELENGTH']
transmit = fnode[:]['THROUGHPUT']
dtype = 'photon'
unit = None
attrs = fnode.attrs
if 'DETECTOR' in attrs:
dtype = attrs['DETECTOR']
if 'WAVELENGTH_UNIT' in attrs:
unit = attrs['WAVELENGTH_UNIT']
fil = UnitFilter(flamb, transmit, name=fnode.name,
dtype=dtype, unit=unit)
if interp & (lamb is not None):
fil = fil.reinterp(lamb)
return fil
def get_library_content(self):
""" get the content of the library """
with self as s:
try:
filters = s.hdf.root.content.cols.TABLENAME[:]
except Exception:
filters = list(s.hdf.root.filters._v_children.keys())
if hasattr(filters[0], 'decode'):
filters = [k.decode('utf8') for k in filters]
return(filters)
def load_all_filters(self, interp=True, lamb=None):
""" load all filters from the library
Parameters
----------
interp: bool
reinterpolate the filters over given lambda points
lamb: ndarray[float, ndim=1]
desired wavelength definition of the filter
Returns
-------
filters: list[filter]
list of filter objects
"""
with self as s:
filters = [s._load_filter(fname, interp=interp, lamb=lamb)
for fname in s.content]
return(filters)
def load_filters(self, names, interp=True, lamb=None, filterLib=None):
""" load a limited set of filters
Parameters
----------
names: list[str]
normalized names according to filtersLib
interp: bool
reinterpolate the filters over given lambda points
lamb: ndarray[float, ndim=1]
desired wavelength definition of the filter
filterLib: path
path to the filter library hd5 file
Returns
-------
filters: list[filter]
list of filter objects
"""
with self as s:
filters = [s._load_filter(fname, interp=interp, lamb=lamb)
for fname in names]
return(filters)
def add_filter(self, f, **kwargs):
""" Add a filter to the library permanently
Parameters
----------
f: Filter object
filter to add
"""
if not isinstance(f, UnitFilter):
msg = "Argument of type Filter expected. Got type {0}"
raise TypeError(msg.format(type(f)))
if f.wavelength_unit is None:
msg = "Filter wavelength must have units for storage."
raise AttributeError(msg)
append = kwargs.pop('append', True)
f.write_to("{0:s}".format(self.source),
tablename='/filters/{0}'.format(f.name),
createparents=True, append=append,
**kwargs)
def get_library(fname=__default__, **kwargs):
""" Finds the appropriate class to load the library """
if os.path.isfile(fname):
return UnitHDF_Library(fname, **kwargs)
else:
return UnitAscii_Library(fname, **kwargs)
def _reduce_resolution(wi, fi, fwhm0=0.55, sigma_floor=0.2):
""" Adapt the resolution of the spectra to match the lick definitions
Lick definitions have different resolution elements as function
of wavelength. These definition are hard-coded in this function
Parameters
---------
wi: ndarray (n, )
wavelength definition
fi: ndarray (nspec, n) or (n, )
spectra to convert
fwhm0: float
initial broadening in the spectra `fi`
sigma_floor: float
minimal dispersion to consider
Returns
-------
flux_red: ndarray (nspec, n) or (n, )
reduced spectra
"""
# all in AA
w_lick_res = (4000., 4400., 4900., 5400., 6000.)
lick_res = (11.5, 9.2, 8.4, 8.4, 9.8) # FWHM in AA
w = np.asarray(wi)
flux = np.atleast_2d(fi)
# Linear interpolation of lick_res over w
# numpy interp does constant instead of extrapolation
# res = np.interp(w, w_lick_res, lick_res)
# spline order: 1 linear, 2 quadratic, 3 cubic ...
from scipy.interpolate import InterpolatedUnivariateSpline
res = InterpolatedUnivariateSpline(w_lick_res, lick_res, k=1)(w)
# Compute width from fwhm
const = 2. * np.sqrt(2. * np.log(2)) # conversion fwhm --> sigma
lick_sigma = np.sqrt((res ** 2 - fwhm0 ** 2)) / const
# Convolution by g=1/sqrt(2*pi*sigma^2) * exp(-r^2/(2*sigma^2))
flux_red = np.zeros(flux.shape, dtype=flux.dtype)
for i, sigma in enumerate(lick_sigma):
maxsigma = 3. * sigma
# sampling floor: min (0.2, sigma * 0.1)
delta = min(sigma_floor, sigma * 0.1)
delta_wj = np.arange(-maxsigma, + maxsigma, delta)
wj = delta_wj + w[i]
for k, fk in enumerate(flux):
fluxj = np.interp(wj, w, fk, left=0., right=0.)
flux_red[k, i] = np.sum(fluxj * delta *
np.exp(-0.5 * (delta_wj / sigma) ** 2))
flux_red /= lick_sigma * const
return flux_red.reshape(np.shape(fi))
@set_method_default_units('AA', 'flam', output_unit='flam')
def reduce_resolution(wi, fi, fwhm0=0.55 * Unit('AA'),
sigma_floor=0.2 * Unit('AA')):
""" Adapt the resolution of the spectra to match the lick definitions
Lick definitions have different resolution elements as function
of wavelength. These definition are hard-coded in this function
Parameters
---------
wi: ndarray (n, )
wavelength definition
fi: ndarray (nspec, n) or (n, )
spectra to convert
fwhm0: float
initial broadening in the spectra `fi`
sigma_floor: float
minimal dispersion to consider
Returns
-------
flux_red: ndarray (nspec, n) or (n, )
reduced spectra
"""
flux_red = _reduce_resolution(wi.value, fi.value,
fwhm0.to('AA').value,
sigma_floor.to('AA').value)
return flux_red * Unit('flam')
class UnitLickIndex(object):
""" Define a Lick Index similarily to a Filter object """
def __init__(self, name, lick, unit='AA'):
""" Constructor
Parameters
----------
name: str
name of the index
lick: dict
expecting 'blue', 'red', 'band', and 'unit' definitions
`blue` and `red` are used to continuum normalize the spectra
`band` covers the index itself. `unit` gives the index measurement
units, either magnitudes (mag) or equivalent width (ew)
unit: str
wavelength unit of the intervals
"""
self.name = name
self._lick = lick
self.wavelength_unit = unit
def to_dict(self):
""" return a dictionary of the current index """
d = {}
d.update(**self._lick)
return d
def _get_wavelength_attrs_with_units(self, attrname, units='AA'):
""" return the unitwise definition corresponding to attrname """
attr = self._lick[attrname]
if self.wavelength_unit is not None:
if units is None:
return attr * Unit(self.wavelength_unit)
else:
return (attr * Unit(self.wavelength_unit)).to(units)
else:
return attr
@property
def band(self):
""" Unitwise band definition """
return self._get_wavelength_attrs_with_units('band')
@property
def blue(self):
""" Unitwise band definition """
return self._get_wavelength_attrs_with_units('blue')
@property
def red(self):
""" Unitwise band definition """
return self._get_wavelength_attrs_with_units('red')
@property
def index_unit(self):
return self._lick['unit']
def __repr__(self):
txt = """LickIndex ({0}), {1}"""
return txt.format(self.name, object.__repr__(self))
def info(self):
""" display information about the current Index"""
txt = """Lick Index {s.name}
wavelength units: {s.wavelength_unit}
Index Band: {s.band}
Blue continuum band: {s.blue}
Red continuum band: {s.red}
Measurement unit: {s.index_unit}""".format(s=self)
print(txt)
def __call__(self, *args, **kwargs):
""" compute spectral index after continuum subtraction
Parameters
----------
w: ndarray (nw, )
array of wavelengths in AA
flux: ndarray (N, nw)
array of flux values for different spectra in the series
degree: int (default 1)
degree of the polynomial fit to the continuum
Returns
-------
ew: ndarray (N,)
equivalent width or magnitude array
"""
return self.get(*args, **kwargs)
def _get(self, wave, flux, **kwargs):
""" compute spectral index after continuum subtraction
Parameters
----------
w: ndarray (nw, )
array of wavelengths in AA
flux: ndarray (N, nw)
array of flux values for different spectra in the series
degree: int (default 1)
degree of the polynomial fit to the continuum
nocheck: bool
set to silently pass on spectral domain mismatch.
otherwise raises an error when index is not covered
Returns
-------
ew: ndarray (N,)
equivalent width or magnitude array
Raises
------
ValueError: when the spectral coverage wave does not cover the index
range
"""
if hasUnit(wave):
_w = wave.to('AA').value
else:
print("Warning: assuming units are in Angstroms")
_w = _drop_units(wave)
_f = _drop_units(flux)
blue = self._get_wavelength_attrs_with_units('blue').value
red = self._get_wavelength_attrs_with_units('red').value
band = self._get_wavelength_attrs_with_units('band').value
nocheck = kwargs.pop('nocheck', False)
not_covered = (blue[0] < _w[0]) | (red[-1] > _w[-1])
if (not_covered):
if (not nocheck):
raise ValueError("Spectrum does not cover this index.")
else:
return np.zeros(_f.shape[0]) * float('nan')
else:
return self._get_indice(_w, _f, blue, red, band, self.index_unit,
**kwargs)
@classmethod
def _get_indice(cls, w, flux, blue, red, band=None, unit='ew', degree=1,
**kwargs):
""" compute spectral index after continuum subtraction
Parameters
----------
w: ndarray (nw, )
array of wavelengths in AA
flux: ndarray (N, nw)
array of flux values for different spectra in the series
blue: tuple(2)
selection for blue continuum estimate
red: tuple(2)
selection for red continuum estimate
band: tuple(2), optional
select region in this band only.
default is band = (min(blue), max(red))
unit: str
`ew` or `mag` wether equivalent width or magnitude
degree: int (default 1)
degree of the polynomial fit to the continuum
Returns
-------
ew: ndarray (N,)
equivalent width array
"""
wi, fi = cls.continuum_normalized_region_around_line(w, flux, blue,
red, band=band,
degree=degree)
if unit in (0, 'ew', 'EW'):
return np.trapz(1. - fi, wi, axis=-1)
else:
m = np.trapz(fi, wi, axis=-1)
m = -2.5 * np.log10(m / np.ptp(wi))
return m
@classmethod
def continuum_normalized_region_around_line(cls, wi, fi, blue, red,
band=None, degree=1):
"""
cut out and normalize flux around a line
Parameters
----------
wi: ndarray (nw, )
array of wavelengths in AA
fi: ndarray (N, nw)
array of flux values for different spectra in the series
blue: tuple(2)
selection for blue continuum estimate
red: tuple(2)
selection for red continuum estimate
band: tuple(2), optional
select region in this band only.
default is band = (min(blue), max(red))
degree: int
degree of the polynomial fit to the continuum
returns
-------
wnew: ndarray (nw1, )
wavelength of the selection in AA
f: ndarray (N, len(wnew))
normalized flux in the selection region
.. example::
# indice of CaII
# wavelength are always supposed in AA
w, f = region_around_line(
wavelength, flux, [3925, 3930],[3938, 3945]]
)
"""
w = np.asarray(wi)
flux = np.atleast_2d(fi)
# index is true in the region where we fit the polynomial
indcont = (((w >= blue[0]) & (w <= blue[1])) |
((w >= red[0]) & (w <= red[1]))
)
# index of the region we want to return
if band is None:
band = blue[0], red[1]
indrange = (w > band[0]) & (w < band[1])
wnew = w[indrange]
wcont = w[indcont]
# make a flux array of shape
# (number of spectra, number of points in indrange)
f = np.zeros((flux.shape[0], indrange.sum()))
for i in range(flux.shape[0]):
# fit polynomial of second order to the continuum region
linecoeff = np.polyfit(wcont, flux[i, indcont], degree)
# divide the flux by the polynomial and put the result in our new
# flux array
f[i, :] = flux[i, indrange] / np.polyval(linecoeff, wnew)
return wnew, np.squeeze(f)
@set_method_default_units('AA', 'flam')
def get(self, wave, flux, **kwargs):
""" compute spectral index after continuum subtraction
Parameters
----------
w: ndarray (nw, )
array of wavelengths in AA
flux: ndarray (N, nw)
array of flux values for different spectra in the series
degree: int (default 1)
degree of the polynomial fit to the continuum
nocheck: bool
set to silently pass on spectral domain mismatch.
otherwise raises an error when index is not covered
Returns
-------
ew: ndarray (N,)
equivalent width or magnitude array
Raises
------
ValueError: when the spectral coverage wave does not cover the index
range
"""
return self._get(wave, flux.to('flam').value, **kwargs)
class UnitLickLibrary(object):
""" Collection of Lick indices """
def __init__(self, fname=__default_lick__, comment='#'):
self.source = fname
data, hdr = self._read_lick_list(fname, comment)
self._content = data
self._hdr = hdr
@property
def description(self):
""" any comment in the input file """
return self._hdr
@classmethod
def _read_lick_list(cls, fname=__default_lick__, comment='#'):
""" read the list of lick indices
Parameters
----------
fname: str
file containing the indices' definitions
comment: str
character indicating comment in the file
Returns
-------
data: dict
dictionary of indices
name: (band, blue, red, unit)
"""
with open(fname, 'r') as f:
data = {}
hdr = []
for line in f:
if line[0] != comment:
_line = line.split()
attr = dict(
band=(float(_line[1]), float(_line[2])),
blue=(float(_line[3]), float(_line[4])),
red=(float(_line[5]), float(_line[6])),
unit='mag' if int(_line[7]) > 0 else 'ew',
)
name = _line[8]
data[name] = attr
else:
hdr.append(line[1:-1])
return data, hdr
def __repr__(self):
return "Lick Index Library: {0}\n{1:s}".format(self.source,
object.__repr__(self))
def __enter__(self):
""" Enter context """
return self
def __exit__(self, *exc_info):
""" end context """
return False
def __len__(self):
""" Size of the library """
return len(self.content)
def get_library_content(self):
return list(self._content.keys())
def __getitem__(self, name):
""" Make this object like a dictionary and load one or multiple filters
"""
with self as s:
try:
f = s._load_filter(name)
except TypeError:
f = [s._load_filter(k) for k in name]
return f
def _load_filter(self, fname, **kwargs):
""" Load a given filter from the library """
with self as current_lib:
return UnitLickIndex(fname, current_lib._content[fname])
@property
def content(self):
return self.get_library_content()
def find(self, name, case_sensitive=True):
r = []
if not case_sensitive:
_n = name.lower()
for k in self.get_library_content():
if _n in k.lower():
r.append(k)
else:
for k in self.content:
if name in k:
r.append(k)
return r
| mit |
jmargeta/scikit-learn | examples/svm/plot_iris.py | 4 | 1951 | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on the iris dataset. It
will plot the decision surface for four different SVM classifiers.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, Y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, Y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, Y)
lin_svc = svm.LinearSVC(C=C).fit(X, Y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel',
'LinearSVC (linear kernel)']
for i, clf in enumerate((svc, rbf_svc, poly_svc, lin_svc)):
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
pl.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.contourf(xx, yy, Z, cmap=pl.cm.Paired)
pl.axis('off')
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.title(titles[i])
pl.show()
| bsd-3-clause |
wuxue/altanalyze | ResultsExport_module.py | 1 | 32231 | ###ResultsExport_module
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - [email protected]
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, string
import os.path
import statistics
import unique
import export
dirfile = unique
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
#add in code to prevent folder names from being included
dir_list2 = []
for entry in dir_list:
#if entry[-4:] == ".txt" or entry[-4:] == ".all" or entry[-5:] == ".data" or entry[-3:] == ".fa":
dir_list2.append(entry)
return dir_list2
def returnDirectories(sub_dir):
dir=os.path.dirname(dirfile.__file__)
dir_list = os.listdir(dir + sub_dir)
###Below code used to prevent FILE names from being included
dir_list2 = []
for entry in dir_list:
if "." not in entry: dir_list2.append(entry)
return dir_list2
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
class GrabFiles:
def setdirectory(self,value): self.data = value
def display(self): print self.data
def searchdirectory(self,search_term):
#self is an instance while self.data is the value of the instance
files = getDirectoryFiles(self.data,search_term)
if len(files)<1: print 'files not found'
return files
def returndirectory(self):
dir_list = getAllDirectoryFiles(self.data)
return dir_list
def getAllDirectoryFiles(import_dir):
all_files = []
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
all_files.append(data_dir)
return all_files
def getDirectoryFiles(import_dir,search_term):
dir_list = read_directory(import_dir) #send a sub_directory to a function to identify all files in a directory
matches=[]
for data in dir_list: #loop through each file in the directory to output results
data_dir = import_dir[1:]+'/'+data
if search_term not in data_dir: matches.append(data_dir)
return matches
############ Result Export Functions #############
def outputSummaryResults(summary_results_db,name,analysis_method,root_dir):
#summary_results_db[dataset_name] = udI,udI-up_diff,ddI,ddI-down_diff,udI_mx,udI_mx-mx_diff,up_dI_genes,down_gene, annotation_list
annotation_db = {}
for dataset in summary_results_db:
for entry in summary_results_db[dataset][-1]:
annotation = entry[0]
count = entry[1]
if 'AA:' not in annotation:
try: annotation_db[annotation].append((dataset,count))
except KeyError: annotation_db[annotation] = [(dataset,count)]
annotation_ls = []
for annotation in annotation_db: annotation_ls.append(annotation)
annotation_ls.sort()
annotation_db2={}
for annotation in annotation_ls:
for dataset in summary_results_db:
y=0
for entry in summary_results_db[dataset][-1]:
annotation2 = entry[0]
count = entry[1]
if annotation2 == annotation:
y=1; new_count = count
if y == 1:
try: annotation_db2[dataset].append((annotation,new_count))
except KeyError: annotation_db2[dataset] = [(annotation,new_count)]
else:
try: annotation_db2[dataset].append((annotation,0))
except KeyError: annotation_db2[dataset] = [(annotation,0)]
summary_output = root_dir+'AltResults/AlternativeOutput/'+analysis_method+'-summary-results'+name+'.txt'
fn=filepath(summary_output)
data = export.createExportFile(summary_output,'AltResults/AlternativeOutput')
if analysis_method == 'splicing-index' or analysis_method == 'FIRMA':
event_type1 = 'inclusion-events'; event_type2 = 'exclusion-events'; event_type3 = 'alternative-exons'
else:
event_type1 = 'inclusion-events'; event_type2 = 'exclusion-events'; event_type3 = 'mutually-exlusive-events'
title = 'Dataset-name' +'\t'+ event_type1+'\t'+event_type2 +'\t'+ event_type3 +'\t'+ 'up-deltaI-genes' +'\t'+ 'down-deltaI-genes' +'\t'+ 'total-'+analysis_method+'-genes'
title = title +'\t' + 'upregulated_genes' +'\t'+ 'downregulated_genes' +'\t'+ analysis_method+'-genes-differentially-exp'+'\t'+ 'RNA_processing/binding-factors-upregulated' +'\t'+ 'RNA_processing/binding-factors-downregulated' +'\t'+ analysis_method+'_RNA_processing/binding-factors'
title = title +'\t'+ 'avg-downregulated-peptide-length' +'\t'+ 'std-downregulated-peptide-length' +'\t'+ 'avg-upregulated-peptide-length' +'\t'+ 'std-upregulated-peptide-length' +'\t'+ 'ttest-peptide-length' +'\t'+ 'median-peptide-length-fold-change'
for entry in annotation_ls: title = title +'\t'+ entry
data.write(title+'\n')
for dataset in summary_results_db:
values = dataset
for entry in summary_results_db[dataset][0:-1]: values = values +'\t'+ str(entry)
if dataset in annotation_db2:
for entry in annotation_db2[dataset]: values = values +'\t'+ str(entry[1])
data.write(values+'\n')
data.close()
def compareAltAnalyzeResults(aspire_output_list,annotate_db,number_events_analyzed,analyzing_genes,analysis_method,array_type,root_dir):
aspire_gene_db = {}; aspire_event_db = {}; event_annotation_db = {}; dataset_name_list = []
#annotate_db[affygene] = name, symbol,ll_id,splicing_annotation
include_all_other_genes = 'yes'
for filename in aspire_output_list:
x = 0
fn=filepath(filename)
if '\\' in filename: names = string.split(filename,'\\') #grab file name
else: names = string.split(filename,'/')
try: names = string.split(names[-1],'-'+analysis_method)
except ValueError: print names;kill
name = names[0]
dataset_name_list.append(name)
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
data = string.split(data,'\t') #remove endline
y = 0
if x == 0: x=1
else:
if analyzing_genes == 'no':
if (array_type == 'exon' or array_type == 'gene') and analysis_method in filename:
lowest_pvalue = float(data[8]);
try: si_p = float(data[20])
except Exception: si_p = 1
try: midas_p = float(data[9])
except ValueError: midas_p = 0
#print si_p,midas_p;kill
#if lowest_pvalue < 0.05:
y = 1
affygene = data[0]; dI = float(data[1])
symbol = data[2]; description = data[3]
exon_set1 = data[4]; exon_set2 = ''
event_call = data[27]; functional_attribute = data[14]
uniprot_attribute = data[15]; gene_expression_change = data[22]
dI = dI*(-1)
elif analysis_method in filename:
y = 1
affygene = data[0]; dI = float(data[1])
symbol = data[2]; description = data[3]
exon_set1 = data[4]+'('+data[8]+')'; exon_set2 = data[5]+'('+data[10]+')'
event_call = data[27]; functional_attribute = data[14]
uniprot_attribute = data[15]; gene_expression_change = data[22]
if analysis_method == 'linearregres' or analysis_method == 'ASPIRE':
functional_attribute = data[19]; uniprot_attribute = data[20]
#print exon_set1, exon_set2, data[:5];kill
else:
if (array_type == 'exon' or array_type == 'gene') and analysis_method in filename:
y = 1
affygene = data[0]; dI = float(data[1])
symbol = data[3]; description = data[5]
dI_direction = data[6]; locus_link = affygene
exon_set1 = ''; exon_set2 = ''
event_call = data[-4]; functional_attribute = data[-9]
uniprot_attribute = data[-8]; gene_expression_change = data[-5]
if dI_direction == 'upregulated': dI = dI*(-1)
elif analysis_method in filename:
y = 1
affygene = data[0]; dI = float(data[1])
symbol = data[3]; description = data[5]
dI_direction = data[6]; locus_link = data[4]
exon_set1 = ''; exon_set2 = ''
event_call = data[-4]; functional_attribute = data[-9]
uniprot_attribute = data[-8]; gene_expression_change = data[-5]
if dI_direction == 'downregulated': dI = dI*(-1)
#print affygene,data[-10:];kill
if y == 1:
data_tuple = [name,functional_attribute,uniprot_attribute,gene_expression_change,dI]
try: aspire_event_db[affygene,exon_set1,exon_set2].append(data_tuple)
except KeyError: aspire_event_db[affygene,exon_set1,exon_set2] = [data_tuple]
event_annotation_db[affygene,exon_set1,exon_set2] = event_call,symbol,description
aspire_event_db2 = {}; splice_gene_db = {}; dataset_name_list.sort()
for name in dataset_name_list:
for key in event_annotation_db:
###record all genes in the event_annotation_db
splice_gene_db[key[0]] = key[0]
if key in aspire_event_db:
x = 0
for entry in aspire_event_db[key]:
if entry[0] == name:
x = 1
dI = entry[1],entry[2],entry[3],entry[4]
try: aspire_event_db2[key].append(dI)
except KeyError: aspire_event_db2[key] = [dI]
if x ==0:
try: aspire_event_db2[key].append(('','','',0))
except KeyError: aspire_event_db2[key] = [('','','',0)]
else:
try: aspire_event_db2[key].append(('','','',0))
except KeyError: aspire_event_db2[key] = [('','','',0)]
for key in aspire_event_db2:
dataset_size = len(aspire_event_db2[key])
break
###Add all other Affygene's
temp=[]; x = 0
while x < dataset_size:
temp.append(('','','',0))
x +=1
for affygene in annotate_db:
if affygene not in splice_gene_db:
aspire_event_db2[affygene,'',''] = temp
if include_all_other_genes == 'yes': analysis_method+= '-all-genes'
if analyzing_genes == 'no': summary_output = root_dir+'AltResults/AlternativeOutput/'+analysis_method+'-comparisons-events.txt'
else: summary_output = root_dir+'AltResults/AlternativeOutput/'+analysis_method+'-'+ 'GENE-' +'comparisons-events.txt'
fn=filepath(summary_output)
data = open(fn,'w')
title = 'GeneID' +'\t'+ 'symbol'+'\t'+'description' +'\t'+ 'exon_set1' +'\t'+ 'exon_set2' +'\t'+ 'event_call' +'\t'+ 'splicing_factor_call'
for entry in dataset_name_list:
title = title +'\t'+ entry + '-functional-attribute' +'\t'+ entry + '-uniprot-attribute' +'\t'+ entry +'-GE-change' +'\t'+ entry +'-dI'
data.write(title +'\t'+ 'common-hits' + '\n')
for key in aspire_event_db2:
affygene = key[0]; exon_set1 = key[1]; exon_set2 = key[2]
if affygene in annotate_db: splicing_factor_call = annotate_db[affygene].RNAProcessing()
else: splicing_factor_call = ''
try:
event_call = event_annotation_db[key][0]
symbol = event_annotation_db[key][1]
description = event_annotation_db[key][2]
except KeyError:
event_call = ''; symbol = ''; description = ''
values = affygene +'\t'+ symbol +'\t'+ description +'\t'+ exon_set1 +'\t'+ exon_set2 +'\t'+ event_call +'\t'+ splicing_factor_call
x=0
for entry in aspire_event_db2[key]:
for info in entry: values = values +'\t'+ str(info)
if entry[-1] != 0: x +=1
values = values +'\t'+ str(x) + '\n'
if include_all_other_genes == 'no':
if x>0: data.write(values)
else: data.write(values)
data.close()
def exportTransitResults(array_group_list,array_raw_group_values,array_group_db,avg_const_exp_db,adj_fold_dbase,exon_db,dataset_name,apt_dir):
"""Export processed raw expression values (e.g. add global fudge factor or eliminate probe sets based on filters) to txt files
for analysis with MiDAS"""
#array_group_list contains group names in order of analysis
#array_raw_group_values contains expression values for the x number of groups in above list
#array_group_db key is the group name and values are the list of array names
#avg_const_exp_db contains the average expression values for all arrays for all constitutive probesets, with gene as the key
ordered_array_header_list=[]
for group in array_group_list: ###contains the correct order for each group
for array_id in array_group_db[group]:
ordered_array_header_list.append(str(array_id))
ordered_exp_val_db = {} ###new dictionary containing all expression values together, but organized based on group
probeset_affygene_db = {} ###lists all altsplice probesets and corresponding affygenes
for probeset in array_raw_group_values:
try:
include_probeset = 'yes'
###Examines user input parameters for inclusion of probeset types in the analysis
if include_probeset == 'yes':
if probeset in adj_fold_dbase: ###indicates that this probeset is analyzed for splicing (e.g. has a constitutive probeset)
for group_val_list in array_raw_group_values[probeset]:
non_log_group_exp_vals = statistics.log_fold_conversion(group_val_list)
for val in non_log_group_exp_vals:
try: ordered_exp_val_db[probeset].append(str(val))
except KeyError: ordered_exp_val_db[probeset] = [str(val)]
affygene = exon_db[probeset].GeneID()
try: probeset_affygene_db[affygene].append(probeset)
except KeyError: probeset_affygene_db[affygene] = [probeset]
except KeyError:
###Indicates that the expression dataset file was not filtered for whether annotations exist in the exon annotation file
###In that case, just ignore the entry
null = ''
gene_count = 0
ordered_gene_val_db={}
for affygene in avg_const_exp_db: ###now, add all constitutive gene level expression values (only per anlayzed gene)
if affygene in probeset_affygene_db: ###ensures we only include gene data where there are altsplice examined probesets
non_log_ordered_exp_const_val = statistics.log_fold_conversion(avg_const_exp_db[affygene])
gene_count+=1
for val in non_log_ordered_exp_const_val:
try: ordered_gene_val_db[affygene].append(str(val))
except KeyError: ordered_gene_val_db[affygene] = [str(val)]
convert_probesets_to_numbers={}
convert_affygene_to_numbers={}; array_type = 'junction'
probeset_affygene_number_db={}; x=0; y=0
for affygene in probeset_affygene_db:
x+=1; y = x ###each affygene has a unique number, from other affygenes and probesets and probesets count up from each affygene
x_copy = x
example_gene_probeset = probeset_affygene_db[affygene][0]
#if exon_db[example_gene_probeset].ArrayType() == 'exon': x_copy = exon_db[example_gene_probeset].SecondaryGeneID()
if x_copy not in exon_db:
convert_affygene_to_numbers[affygene] = str(x_copy)
else: print affygene, x_copy,'new numeric for MIDAS already exists as a probeset ID number'; kill
for probeset in probeset_affygene_db[affygene]:
y = y+1; y_copy = y
if exon_db[probeset].ArrayType() == 'exon':
y_copy = probeset ### Only appropriate when the probeset ID is a number
array_type = 'exon'
convert_probesets_to_numbers[probeset] = str(y_copy)
try: probeset_affygene_number_db[str(x_copy)].append(str(y_copy))
except KeyError: probeset_affygene_number_db[str(x_copy)] = [str(y_copy)]
x=y
metafile = 'AltResults/MIDAS/meta-'+dataset_name[0:-1]+'.txt'
data1 = export.createExportFile(metafile,'AltResults/MIDAS')
title = 'probeset_id\ttranscript_cluster_id\tprobeset_list\tprobe_count\n'
data1.write(title)
for affygene in probeset_affygene_number_db:
probeset_list = probeset_affygene_number_db[affygene]; probe_number = str(len(probeset_list)*6)
probeset_list = [string.join(probeset_list,' ')]
probeset_list.append(affygene); probeset_list.append(affygene); probeset_list.reverse(); probeset_list.append(probe_number)
probeset_list = string.join(probeset_list,'\t'); probeset_list=probeset_list+'\n'
data1.write(probeset_list)
data1.close()
junction_exp_file = 'AltResults/MIDAS/'+array_type+'-exp-'+dataset_name[0:-1]+'.txt'
fn2=filepath(junction_exp_file)
data2 = open(fn2,'w')
ordered_array_header_list.reverse(); ordered_array_header_list.append('probeset_id'); ordered_array_header_list.reverse()
title = string.join(ordered_array_header_list,'\t')
data2.write(title+'\n')
for probeset in ordered_exp_val_db:
probeset_number = convert_probesets_to_numbers[probeset]
exp_values = ordered_exp_val_db[probeset]; exp_values.reverse(); exp_values.append(probeset_number); exp_values.reverse()
exp_values = string.join(exp_values,'\t'); exp_values = exp_values +'\n'
data2.write(exp_values)
data2.close()
gene_exp_file = 'AltResults/MIDAS/gene-exp-'+dataset_name[0:-1]+'.txt'
fn3=filepath(gene_exp_file)
data3 = open(fn3,'w')
title = string.join(ordered_array_header_list,'\t')
data3.write(title+'\n')
for affygene in ordered_gene_val_db:
try: affygene_number = convert_affygene_to_numbers[affygene]
except KeyError: print len(convert_affygene_to_numbers), len(ordered_gene_val_db); kill
exp_values = ordered_gene_val_db[affygene]; exp_values.reverse(); exp_values.append(affygene_number); exp_values.reverse()
exp_values = string.join(exp_values,'\t'); exp_values = exp_values +'\n'
data3.write(exp_values)
data3.close()
exportMiDASArrayNames(array_group_list,array_group_db,dataset_name,'new')
coversionfile = 'AltResults/MIDAS/probeset-conversion-'+dataset_name[0:-1]+'.txt'
fn5=filepath(coversionfile)
data5 = open(fn5,'w')
title = 'probeset\tprobeset_number\n'; data5.write(title)
for probeset in convert_probesets_to_numbers: ###contains the correct order for each group
probeset_number = convert_probesets_to_numbers[probeset]
values = probeset+'\t'+probeset_number+'\n'
data5.write(values)
data5.close()
"""
### This code is obsolete... used before AltAnalyze could connect to APT directly.
commands = 'AltResults/MIDAS/commands-'+dataset_name[0:-1]+'.txt'
data = export.createExportFile(commands,'AltResults/MIDAS')
path = filepath('AltResults/MIDAS'); path = string.replace(path,'\\','/'); path = 'cd '+path+'\n\n'
metafile = 'meta-'+dataset_name[0:-1]+'.txt'
junction_exp_file = array_type+'-exp-'+dataset_name[0:-1]+'.txt'
gene_exp_file = 'gene-exp-'+dataset_name[0:-1]+'.txt'
celfiles = 'celfiles-'+dataset_name[0:-1]+'.txt'
command_line = 'apt-midas -c '+celfiles+' -g '+gene_exp_file+' -e '+junction_exp_file+' -m '+metafile+' -o '+dataset_name[0:-1]+'-output'
data.write(path); data.write(command_line); data.close()
"""
status = runMiDAS(apt_dir,array_type,dataset_name,array_group_list,array_group_db)
return status
def exportMiDASArrayNames(array_group_list,array_group_db,dataset_name,type):
celfiles = 'AltResults/MIDAS/celfiles-'+dataset_name[0:-1]+'.txt'
fn4=filepath(celfiles)
data4 = open(fn4,'w')
if type == 'old': cel_files = 'cel_file'
elif type == 'new': cel_files = 'cel_files'
title = cel_files+'\tgroup_id\n'; data4.write(title)
for group in array_group_list: ###contains the correct order for each group
for array_id in array_group_db[group]:
values = str(array_id) +'\t'+ str(group) +'\n'
data4.write(values)
data4.close()
def getAPTDir(apt_fp):
###Determine if APT has been set to the right directory and add the analysis_type to the filename
if 'bin' not in apt_fp: ###This directory contains the C+ programs that we wish to call
if 'apt' in apt_fp: ###This directory is the parent directory to 'bin'
apt_fp = apt_fp+'/'+'bin'
elif 'Affymetrix Power Tools' in apt_fp: ###The user selected the parent directory
dir_list = read_directory(apt_fp); versions = [] ###See what folders are in this directory (e.g., specific APT versions)
for folder in dir_list: ###If there are multiple versions
if 'apt' in folder:
version = string.replace(folder,'apt-','')
version = string.split(version,'.'); version_int_list = []
try:
for val in version: version_int_list.append(int(val))
versions.append([version_int_list,folder])
except Exception:
versions.append([folder,folder]) ### arbitrarily choose an APT version if multiple exist and the folder name does not conform to the above
if len(versions)>0:
###By making the versions indexed by the integer list value of the version, we can grab the latest version
versions.sort(); apt_fp = apt_fp+'/'+versions[-1][1]+'/'+'bin' ###Add the full path to the most recent version
return apt_fp
def runMiDAS(apt_dir,array_type,dataset_name,array_group_list,array_group_db):
if '/bin' in apt_dir: apt_file = apt_dir +'/apt-midas' ### if the user selects an APT directory
elif os.name == 'nt':
import platform
if '32bit' in platform.architecture(): apt_file = apt_dir + '/PC/32bit/apt-midas'
elif '64bit' in platform.architecture(): apt_file = apt_dir + '/PC/64bit/apt-midas'
elif 'darwin' in sys.platform: apt_file = apt_dir + '/Mac/apt-midas'
elif 'linux' in sys.platform:
import platform
if '32bit' in platform.architecture(): apt_file = apt_dir + '/Linux/32bit/apt-midas'
elif '64bit' in platform.architecture(): apt_file = apt_dir + '/Linux/64bit/apt-midas'
apt_file = filepath(apt_file)
### Each input file for MiDAS requires a full file path, so get the parent path
midas_input_dir = 'AltResults/MIDAS/'
path=filepath(midas_input_dir)
### Remotely connect to the previously verified APT C+ midas program and run analysis
metafile = path + 'meta-'+dataset_name[0:-1]+'.txt'
exon_or_junction_file = path + array_type+'-exp-'+dataset_name[0:-1]+'.txt'
gene_exp_file = path + 'gene-exp-'+dataset_name[0:-1]+'.txt'
celfiles = path + 'celfiles-'+dataset_name[0:-1]+'.txt'
output_file = path + dataset_name[0:-1]+'-output'
### Delete the output folder if it already exists (may cause APT problems)
delete_status = export.deleteFolder(output_file)
try:
import subprocess
retcode = subprocess.call([
apt_file, "--cel-files", celfiles,"-g", gene_exp_file, "-e", exon_or_junction_file,
"-m", metafile, "-o", output_file])
if retcode: status = 'failed'
else: status = 'run'
except NameError: status = 'failed'
if status == 'failed':
try:
### Try running the analysis with old MiDAS file headers and command
exportMiDASArrayNames(array_group_list,array_group_db,dataset_name,'old')
import subprocess
retcode = subprocess.call([
apt_file, "-c", celfiles,"-g", gene_exp_file, "-e", exon_or_junction_file,
"-m", metafile, "-o", output_file])
if retcode: status = 'failed'
else: status = 'run'
except Exception: status = 'failed'
if status == 'failed': print "apt-midas failed"
else: print "apt-midas run successfully"
return status
def importMidasOutput(dataset_name):
coversionfile = 'AltResults/MIDAS/probeset-conversion-'+dataset_name[0:-1]+'.txt'
#print "Looking for", coversionfile
fn=filepath(coversionfile); x=0; probeset_conversion_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x==0: x=1 ###Occurs for the header line
else:
probeset,probeset_number = string.split(data,'\t')
probeset_conversion_db[probeset_number] = probeset
midas_results = 'AltResults/MIDAS/'+dataset_name[:-1]+'-output'+'/midas.pvalues.txt'
fn=filepath(midas_results); x=0; midas_db={}
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if data[0] == '#': continue
elif x==0: x=1 ###Occurs for the header line
else:
t = string.split(data,'\t')
try: probeset_number,geneid,p = t
except ValueError: print t;kill
try: p = float(p)
except ValueError: p = 1.000 ### "-1.#IND" can occur, when the constituitive and probeset are the same
probeset = probeset_conversion_db[probeset_number]
midas_db[probeset] = p
return midas_db
def combineRawSpliceResults(species,analysis_method):
import_dir = '/AltResults/RawSpliceData/'+species+'/'+analysis_method
g = GrabFiles(); g.setdirectory(import_dir)
files_to_merge = g.searchdirectory('combined') ###made this a term to excluded
headers =[]; combined_data={}
for filename in files_to_merge:
fn=filepath(filename); x=0
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
t = string.split(data,'\t')
if x==0:
headers += t[1:]
x=1 ###Occurs for the header line
else:
values = t; values = values[1:]; key = t[0]
try: combined_data[key]+=values
except KeyError: combined_data[key]=values
max_len=0 ###Some files will contain data that others won't... normalize for this, so we only include raws where there is data in all files examined
for key in combined_data:
if len(combined_data[key])>max_len: max_len = len(combined_data[key])
combined_data2 = {}; k=0; j=0
for key in combined_data:
#print combined_data[key];kill
### '1' in the list, then there was only one constitutive probeset that was 'expressed' in that dataset: thus comparisons are not likely valid
count = list.count(combined_data[key],'1.0')
if len(combined_data[key])==max_len and count <3: combined_data2[key] = combined_data[key]
elif len(combined_data[key])!=max_len: k+=1#; print key,max_len, len(combined_data[key]),combined_data[key]; kill
elif count >2: j+=1
combined_data = combined_data2
#print k,j
export_file = import_dir[1:]+'/combined.txt'
fn=filepath(export_file);data = open(fn,'w')
title = string.join(['gene-probeset']+headers,'\t')+'\n'; data.write(title)
for key in combined_data:
values = string.join([key]+combined_data[key],'\t')+'\n'; data.write(values)
data.close()
print "exported",len(combined_data),"to",export_file
def import_annotations(filename,array_type):
import ExonAnalyze_module
fn=filepath(filename); annotate_db = {}; x = 0
if array_type == 'AltMouse':
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x == 0: x = 1
else:
try: affygene, description, ll_id, symbol, rna_processing_annot = string.split(data,'\t')
except ValueError: affygene, description, ll_id, symbol = string.split(data,'\t'); splicing_annotation = ''
if '"' in description: null,description,null = string.split(description,'"')
rna_processing_annot =''
y = ExonAnalyze_module.GeneAnnotationData(affygene, description, symbol, ll_id, rna_processing_annot)
annotate_db[affygene] = y
else:
for line in open(fn,'rU').xreadlines():
data = cleanUpLine(line)
if x == 0: x = 1
else:
rna_processing_annot=''
try: ensembl, description, symbol, rna_processing_annot = string.split(data,'\t')
except ValueError: ensembl, description, symbol = string.split(data,'\t')
y = ExonAnalyze_module.GeneAnnotationData(ensembl, description, symbol, ensembl, rna_processing_annot)
annotate_db[ensembl] = y
return annotate_db
if __name__ == '__main__':
array_type = 'exon'
a = 'Mm'; b = 'Hs'
e = 'ASPIRE'; f = 'linearregres'; g = 'ANOVA'; h = 'splicing-index'
analysis_method = h
species = b ### edit this
if array_type != 'AltMouse': gene_annotation_file = "AltDatabase/ensembl/"+species+"/"+species+"_Ensembl-annotations.txt"
annotate_db = import_annotations(gene_annotation_file,array_type)
number_events_analyzed = 0
analyzing_genes = 'no'
root_dir = 'C:/Users/Nathan Salomonis/Desktop/Gladstone/1-datasets/Combined-GSE14588_RAW/junction/' ### edit this
a = root_dir+'AltResults/AlternativeOutput/Hs_Junction_d14_vs_d7.p5_average-ASPIRE-exon-inclusion-results.txt' ### edit this
b = root_dir+'AltResults/AlternativeOutput/Hs_Junction_d14_vs_d7.p5_average-splicing-index-exon-inclusion-results.txt' ### edit this
aspire_output_list = [a,b]
compareAltAnalyzeResults(aspire_output_list,annotate_db,number_events_analyzed,analyzing_genes,analysis_method,array_type,root_dir)
#dataset_name = 'test.'; apt_dir = 'AltDatabase/affymetrix/APT'
#aspire_output_gene_list = ['AltResults/AlternativeOutput/Hs_Exon_CS-d40_vs_hESC-d0.p5_average-splicng_index-exon-inclusion-GENE-results.txt', 'AltResults/AlternativeOutput/Hs_Exon_Cyt-NP_vs_Cyt-ES.p5_average-splicing_index-exon-inclusion-GENE-results.txt', 'AltResults/AlternativeOutput/Hs_Exon_HUES6-NP_vs_HUES6-ES.p5_average-splicing_index-exon-inclusion-GENE-results.txt']
#runMiDAS(apt_dir,array_type,dataset_name,{},()); sys.exit()
#midas_db = importMidasOutput(dataset_name) | apache-2.0 |
Akshay0724/scikit-learn | examples/decomposition/plot_kernel_pca.py | 350 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
ningchi/scikit-learn | sklearn/tests/test_isotonic.py | 16 | 11166 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
if __name__ == "__main__":
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
Fireblend/scikit-learn | examples/svm/plot_svm_kernels.py | 326 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
google/lasr | third_party/softras/soft_renderer/functional/directional_lighting.py | 1 | 1210 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def directional_lighting(light, normals, light_intensity=0.5, light_color=(1,1,1),
light_direction=(0,1,0)):
# normals: [nb, :, 3]
device = light.device
if isinstance(light_color, tuple) or isinstance(light_color, list):
light_color = torch.tensor(light_color, dtype=torch.float32, device=device)
elif isinstance(light_color, np.ndarray):
light_color = torch.from_numpy(light_color).float().to(device)
if isinstance(light_direction, tuple) or isinstance(light_direction, list):
light_direction = torch.tensor(light_direction, dtype=torch.float32, device=device)
elif isinstance(light_direction, np.ndarray):
light_direction = torch.from_numpy(light_direction).float().to(device)
if light_color.ndimension() == 1:
light_color = light_color[None, :]
if light_direction.ndimension() == 1:
light_direction = light_direction[None, :] #[nb, 3]
cosine = F.relu(torch.sum(normals * light_direction, dim=2)) #[]
light += light_intensity * (light_color[:, None, :] * cosine[:, :, None])
return light #[nb, :, 3] | apache-2.0 |
Akshay0724/scikit-learn | sklearn/tests/test_kernel_ridge.py | 339 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
Fireblend/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 308 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
alvations/lightfm | tests/utils.py | 11 | 2205 | import numpy as np
from sklearn.metrics import roc_auc_score
def precision_at_k(model, ground_truth, k, user_features=None, item_features=None):
"""
Measure precision at k for model and ground truth.
Arguments:
- lightFM instance model
- sparse matrix ground_truth (no_users, no_items)
- int k
Returns:
- float precision@k
"""
ground_truth = ground_truth.tocsr()
no_users, no_items = ground_truth.shape
pid_array = np.arange(no_items, dtype=np.int32)
precisions = []
for user_id, row in enumerate(ground_truth):
uid_array = np.empty(no_items, dtype=np.int32)
uid_array.fill(user_id)
predictions = model.predict(uid_array, pid_array,
user_features=user_features,
item_features=item_features,
num_threads=4)
top_k = set(np.argsort(-predictions)[:k])
true_pids = set(row.indices[row.data == 1])
if true_pids:
precisions.append(len(top_k & true_pids) / float(k))
return sum(precisions) / len(precisions)
def full_auc(model, ground_truth, user_features=None, item_features=None):
"""
Measure AUC for model and ground truth on all items.
Arguments:
- lightFM instance model
- sparse matrix ground_truth (no_users, no_items)
Returns:
- float AUC
"""
ground_truth = ground_truth.tocsr()
no_users, no_items = ground_truth.shape
pid_array = np.arange(no_items, dtype=np.int32)
scores = []
for user_id, row in enumerate(ground_truth):
uid_array = np.empty(no_items, dtype=np.int32)
uid_array.fill(user_id)
predictions = model.predict(uid_array, pid_array,
user_features=user_features,
item_features=item_features,
num_threads=4)
true_pids = row.indices[row.data == 1]
grnd = np.zeros(no_items, dtype=np.int32)
grnd[true_pids] = 1
if len(true_pids):
scores.append(roc_auc_score(grnd, predictions))
return sum(scores) / len(scores)
| apache-2.0 |
solashirai/edx-platform | lms/djangoapps/course_api/blocks/tests/test_api.py | 7 | 4385 | """
Tests for Blocks api.py
"""
from django.test.client import RequestFactory
from course_blocks.tests.helpers import EnableTransformerRegistryMixin
from student.tests.factories import UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import SampleCourseFactory
from ..api import get_blocks
class TestGetBlocks(EnableTransformerRegistryMixin, SharedModuleStoreTestCase):
"""
Tests for the get_blocks function
"""
@classmethod
def setUpClass(cls):
super(TestGetBlocks, cls).setUpClass()
cls.course = SampleCourseFactory.create()
# hide the html block
cls.html_block = cls.store.get_item(cls.course.id.make_usage_key('html', 'html_x1a_1'))
cls.html_block.visible_to_staff_only = True
cls.store.update_item(cls.html_block, ModuleStoreEnum.UserID.test)
def setUp(self):
super(TestGetBlocks, self).setUp()
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
def test_basic(self):
blocks = get_blocks(self.request, self.course.location, self.user)
self.assertEquals(blocks['root'], unicode(self.course.location))
# subtract for (1) the orphaned course About block and (2) the hidden Html block
self.assertEquals(len(blocks['blocks']), len(self.store.get_items(self.course.id)) - 2)
self.assertNotIn(unicode(self.html_block.location), blocks['blocks'])
def test_no_user(self):
blocks = get_blocks(self.request, self.course.location)
self.assertIn(unicode(self.html_block.location), blocks['blocks'])
def test_access_before_api_transformer_order(self):
"""
Tests the order of transformers: access checks are made before the api
transformer is applied.
"""
blocks = get_blocks(self.request, self.course.location, self.user, nav_depth=5, requested_fields=['nav_depth'])
vertical_block = self.store.get_item(self.course.id.make_usage_key('vertical', 'vertical_x1a'))
problem_block = self.store.get_item(self.course.id.make_usage_key('problem', 'problem_x1a_1'))
vertical_descendants = blocks['blocks'][unicode(vertical_block.location)]['descendants']
self.assertIn(unicode(problem_block.location), vertical_descendants)
self.assertNotIn(unicode(self.html_block.location), vertical_descendants)
def test_sub_structure(self):
sequential_block = self.store.get_item(self.course.id.make_usage_key('sequential', 'sequential_y1'))
blocks = get_blocks(self.request, sequential_block.location, self.user)
self.assertEquals(blocks['root'], unicode(sequential_block.location))
self.assertEquals(len(blocks['blocks']), 5)
for block_type, block_name, is_inside_of_structure in (
('vertical', 'vertical_y1a', True),
('problem', 'problem_y1a_1', True),
('chapter', 'chapter_y', False),
('sequential', 'sequential_x1', False),
):
block = self.store.get_item(self.course.id.make_usage_key(block_type, block_name))
if is_inside_of_structure:
self.assertIn(unicode(block.location), blocks['blocks'])
else:
self.assertNotIn(unicode(block.location), blocks['blocks'])
def test_filtering_by_block_types(self):
sequential_block = self.store.get_item(self.course.id.make_usage_key('sequential', 'sequential_y1'))
# not filtered blocks
blocks = get_blocks(self.request, sequential_block.location, self.user, requested_fields=['type'])
self.assertEquals(len(blocks['blocks']), 5)
found_not_problem = False
for block in blocks['blocks'].itervalues():
if block['type'] != 'problem':
found_not_problem = True
self.assertTrue(found_not_problem)
# filtered blocks
blocks = get_blocks(self.request, sequential_block.location, self.user,
block_types_filter=['problem'], requested_fields=['type'])
self.assertEquals(len(blocks['blocks']), 3)
for block in blocks['blocks'].itervalues():
self.assertEqual(block['type'], 'problem')
| agpl-3.0 |
martin1/thesis | src/Forecasting/ma.py | 1 | 7645 | '''
Created on Aug 7, 2013
@author: martin
'''
from pandas.tseries.index import date_range
import datetime
from pandas.stats.moments import rolling_mean, ewma
import matplotlib.pyplot as plt
from util import get_sysprice_list
from pandas.core.series import Series
from pandas.tools.merge import concat
from matplotlib.pyplot import legend
from sklearn.metrics.metrics import mean_absolute_error, mean_squared_error
from Forecasting.mape import mean_absolute_percentage_error
from Forecasting.util import *
'''
ts - time series in pandas.Series format
forecast_window - number of days to forecast time series forward from its end
'''
'''def forecast_MA(ts, periods_of_data_to_use=7, days_to_forecast=18):
#find last time in time series
last_index = ts.last_valid_index()
if not last_index.time(): #in case daily/weekly/monthly data
#create forecast index
forecast_index = date_range(last_index + datetime.timedelta(days=1), periods=days_to_forecast)
else: #in case hourly data
#create forecast index
forecast_index = date_range(last_index + datetime.timedelta(hours=1), periods=days_to_forecast)
#days_to_forecast*=24
for item in forecast_index:print item
#Make my own rolling mean forecaster
#Extract only that data which is necessary to make the first moving average calculation
MA_data_series = ts.tail(periods_of_data_to_use)
forecast = Series()
for item in MA_data_series:print item
for time in forecast_index:
#forecasted value is last value in rolling_mean list - all others are NaN because of forecast window length
forecast_value = rolling_mean(MA_data_series, periods_of_data_to_use).loc[-1]
print forecast_value
#remove 1-st value from data because its not needed for next forecasted value
MA_data_series = MA_data_series[1:]
#Append forecasted value to data because forecast is data for next iteration MA
MA_data_series = concat([MA_data_series, Series(forecast_value, index=[time])])
forecast = concat([forecast, Series(forecast_value, index=[time])])
#MA_data_series is now the forecast, because all original values have been "Rolled over" (e.g. removed)
f = open('/home/martin/forecast.txt', 'w')
for item in forecast:
print>>f, item
f.close()
return forecast
#############################################################################################################
days_to_forecast = 7
periods_of_data_to_use = 11
ts = get_sysprice_list('2013-05-19 00:00:00', '2013-06-18 23:00:00', frequency='daily')
f = open('/home/martin/ts.txt', 'w')
for item in ts:
print>>f, item
f.close()
forecast_ts = get_sysprice_list('2011-01-01 00:00:00', '2013-05-31 23:00:00', frequency='daily')
f2 = forecast_MA(forecast_ts, 2, 18)
f5 = forecast_MA(forecast_ts, 5, 18)
f7 = forecast_MA(forecast_ts, 7, 18)
f30 = forecast_MA(forecast_ts, 30, 18)
f60 = forecast_MA(forecast_ts, 60, 18)
f180 = forecast_MA(forecast_ts, 180, 18)
#############################################
#Calculate error metrics
#############################################
data = [item for item in ts.tail(18)]
print data
#data = data[:7]
print data
f_data = [item for item in f30]
print "MAE: ", round(mean_absolute_error(data, f_data),2)
print "MSE: ", round(mean_squared_error(data, f_data),2)
print mean_absolute_percentage_error(data, f_data)
'''
class Moving_Average_Forecaster(Forecaster):
'''Simple moving average forecaster'''
def __init__(self, training_ts, forecast_method='ma'):
self.forecast_method = forecast_method
Forecaster.__init__(self, training_ts)
def forecast(self, forecast_start_str, forecast_period_in_days, periods_of_data_to_use):
'''Perform the forecast and return forecast as pandas Series object'''
#create forecast index
forecast_index = date_range(forecast_start_str, periods=forecast_period_in_days)
#Extract only that data which is necessary to make the first moving average calculation
data_series = self.training_ts.tail(periods_of_data_to_use)
forecast = Series()
for time in forecast_index:
#forecasted value is last value in rolling_mean list - all others are NaN because of forecast window length
if self.forecast_method == 'ma':
#Forecast using the simple moving average
forecast_value = rolling_mean(data_series, periods_of_data_to_use).loc[-1]
elif self.forecast_method == 'ewma':
#forecast using the exponentially weighted moving average
forecast_value = ewma(data_series, span=periods_of_data_to_use).loc[-1]
#print forecast_value
#remove 1-st value from data because its not needed for next forecasted value
data_series = data_series[1:]
#Append forecasted value to data because forecast is data for next iteration MA
data_series = concat([data_series, Series(forecast_value, index=[time])])
forecast = concat([forecast, Series(forecast_value, index=[time])])
return forecast
#return forecast_ewma(self.training_ts, span=periods_of_data_to_use, forecast_days=forecast_period_in_days)
#############################################################
#simple moving average
#calculate 2,3,5, 7,14,21,30,60,180 day MA-s
spans = [2,3,5,7,14,21,30,60,90,180]
forecast_errors = dict()
output = list()
fc = Moving_Average_Forecaster(training_ts=ds, forecast_method='ma')
'''
for i in spans:
errors = list()
for n in range(0, len(ev_ds)):
errors.append(get_errors(ev_ds[n].values, fc.forecast(ev_periods[n][0], forecast_period_in_days=7, periods_of_data_to_use=i).values))
forecast_errors['sma_'+str(i)] = errors
#output.append(forecast_errors)
output.append("Best forecast estimation: "+ str(get_best_forecast(forecast_errors, type='sum')))
output.append("-------------------")
output.append("Forecasts for EV periods 1, 2 and 3")
output.append("-------------------")
errors = dict()
for i in spans:
name = "sma_" + str(i)
#string = name
errs = list()
for n in range(0, len(ev_ds)):
#string += " " +str(get_errors(ev_ds[n].values, fc.forecast(ev_periods[n][0], forecast_period_in_days=7, periods_of_data_to_use=i).values))
errs.append(get_errors(ev_ds[n].values, fc.forecast(ev_periods[n][0], forecast_period_in_days=7, periods_of_data_to_use=i).values))
errors[name] = errs
#output.append(string)
for item in get_top_ev_forecasts(errors):output.append(item)
output.append("-------------------")
output.append("Actual forecast")
output.append("-------------------")
errors = dict()
for i in spans:
name = "sma_" + str(i)
errors[name] = get_errors(actual_data.values, fc.forecast(forecast_dataset[0], forecast_period_in_days=7, periods_of_data_to_use=i).values)
#output.append(name + " " +str(get_errors(actual_data.values, fc.forecast(forecast_dataset[0], forecast_period_in_days=7, periods_of_data_to_use=i).values)))
for item in get_top_actual_forecasts(errors):output.append(item)
write_to_file("/home/martin/dev/git/masters_thesis/Files/sma.txt", output)
'''
actual_data = get_sysprice_list('2013-06-10 00:00:00', '2013-06-16 23:00:00', frequency='daily')
ma_14 = fc.forecast('2013-06-10', forecast_period_in_days=7, periods_of_data_to_use=14)
#ma_21 = fc.forecast('2013-06-10', forecast_period_in_days=7, periods_of_data_to_use=21)
#plot_forecasts([ma_14, ma_21, actual_data], legend_data=['14 Day Moving Average', '21 Day Moving Average', 'Actual Data'])
| gpl-3.0 |
Fireblend/scikit-learn | sklearn/utils/tests/test_random.py | 228 | 7344 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_poluation < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case proabilites 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given proabilites don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
DistrictDataLabs/yellowbrick | yellowbrick/target/feature_correlation.py | 1 | 11670 | # yellowbrick.classifier.feature_correlation
# Feature correlation to dependent variable visualizer.
#
# Author Zijie (ZJ) Poh
# Created: Wed Jul 29 15:30:40 2018 -0700
#
# Copyright (C) 2018 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: feature_correlation.py [33aec16] [email protected] $
"""
Feature Correlation to Dependent Variable Visualizer.
"""
##########################################################################
# Imports
##########################################################################
import numpy as np
from yellowbrick.utils import is_dataframe
from yellowbrick.target.base import TargetVisualizer
from yellowbrick.exceptions import YellowbrickValueError, YellowbrickWarning
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import mutual_info_regression
from scipy.stats import pearsonr
##########################################################################
# Supported Correlation Computations
##########################################################################
CORRELATION_LABELS = {
"pearson": "Pearson Correlation",
"mutual_info-regression": "Mutual Information",
"mutual_info-classification": "Mutual Information",
}
CORRELATION_METHODS = {
"mutual_info-regression": mutual_info_regression,
"mutual_info-classification": mutual_info_classif,
}
##########################################################################
# Class Feature Correlation
##########################################################################
class FeatureCorrelation(TargetVisualizer):
"""
Displays the correlation between features and dependent variables.
This visualizer can be used side-by-side with
``yellowbrick.features.JointPlotVisualizer`` that plots a feature
against the target and shows the distribution of each via a
histogram on each axis.
Parameters
----------
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
method : str, default: 'pearson'
The method to calculate correlation between features and target.
Options include:
- 'pearson', which uses ``scipy.stats.pearsonr``
- 'mutual_info-regression', which uses ``mutual_info-regression``
from ``sklearn.feature_selection``
- 'mutual_info-classification', which uses ``mutual_info_classif``
from ``sklearn.feature_selection``
labels : list, default: None
A list of feature names to use. If a DataFrame is passed to fit and
features is None, feature names are selected as the column names.
sort : boolean, default: False
If false, the features are are not sorted in the plot; otherwise
features are sorted in ascending order of correlation.
feature_index : list,
A list of feature index to include in the plot.
feature_names : list of feature names
A list of feature names to include in the plot.
Must have labels or the fitted data is a DataFrame with column names.
If feature_index is provided, feature_names will be ignored.
color: string
Specify color for barchart
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
features_ : np.array
The feature labels
scores_ : np.array
Correlation between features and dependent variable.
Examples
--------
>>> viz = FeatureCorrelation()
>>> viz.fit(X, y)
>>> viz.show()
"""
def __init__(
self,
ax=None,
method="pearson",
labels=None,
sort=False,
feature_index=None,
feature_names=None,
color=None,
**kwargs
):
super(FeatureCorrelation, self).__init__(ax, **kwargs)
self.correlation_labels = CORRELATION_LABELS
self.correlation_methods = CORRELATION_METHODS
if method not in self.correlation_labels:
raise YellowbrickValueError(
"Method {} not implement; choose from {}".format(
method, ", ".join(self.correlation_labels)
)
)
# Parameters
self.sort = sort
self.color = color
self.method = method
self.labels = labels
self.feature_index = feature_index
self.feature_names = feature_names
def fit(self, X, y, **kwargs):
"""
Fits the estimator to calculate feature correlation to
dependent variable.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs : dict
Keyword arguments passed to the fit method of the estimator.
Returns
-------
self : visualizer
The fit method must always return self to support pipelines.
"""
self._create_labels_for_features(X)
self._select_features_to_plot(X)
# Calculate Features correlation with target variable
if self.method == "pearson":
self.scores_ = np.array(
[pearsonr(x, y, **kwargs)[0] for x in np.asarray(X).T]
)
else:
self.scores_ = np.array(
self.correlation_methods[self.method](X, y, **kwargs)
)
# If feature indices are given, plot only the given features
if self.feature_index:
self.scores_ = self.scores_[self.feature_index]
self.features_ = self.features_[self.feature_index]
# Sort features by correlation
if self.sort:
sort_idx = np.argsort(self.scores_)
self.scores_ = self.scores_[sort_idx]
self.features_ = self.features_[sort_idx]
self.draw()
return self
def draw(self):
"""
Draws the feature correlation to dependent variable, called from fit.
"""
pos = np.arange(self.scores_.shape[0]) + 0.5
self.ax.barh(pos, self.scores_, color=self.color)
# Set the labels for the bars
self.ax.set_yticks(pos)
self.ax.set_yticklabels(self.features_)
return self.ax
def finalize(self):
"""
Finalize the drawing setting labels and title.
"""
self.set_title("Features correlation with dependent variable")
self.ax.set_xlabel(self.correlation_labels[self.method])
self.ax.grid(False, axis="y")
def _create_labels_for_features(self, X):
"""
Create labels for the features
NOTE: this code is duplicated from MultiFeatureVisualizer
"""
if self.labels is None:
# Use column names if a dataframe
if is_dataframe(X):
self.features_ = np.array(X.columns)
# Otherwise use the column index as the labels
else:
_, ncols = X.shape
self.features_ = np.arange(0, ncols)
else:
self.features_ = np.array(self.labels)
def _select_features_to_plot(self, X):
"""
Select features to plot.
feature_index is always used as the filter and
if filter_names is supplied, a new feature_index
is computed from those names.
"""
if self.feature_index:
if self.feature_names:
raise YellowbrickWarning(
"Both feature_index and feature_names "
"are specified. feature_names is ignored"
)
if min(self.feature_index) < 0 or max(self.feature_index) >= X.shape[1]:
raise YellowbrickValueError("Feature index is out of range")
elif self.feature_names:
self.feature_index = []
features_list = self.features_.tolist()
for feature_name in self.feature_names:
try:
self.feature_index.append(features_list.index(feature_name))
except ValueError:
raise YellowbrickValueError("{} not in labels".format(feature_name))
##########################################################################
# Quick Method
##########################################################################
def feature_correlation(
X,
y,
ax=None,
method="pearson",
labels=None,
sort=False,
feature_index=None,
feature_names=None,
color=None,
show=True,
**kwargs
):
"""
Displays the correlation between features and dependent variables.
This visualizer can be used side-by-side with
yellowbrick.features.JointPlotVisualizer that plots a feature
against the target and shows the distribution of each via a
histogram on each axis.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
method : str, default: 'pearson'
The method to calculate correlation between features and target.
Options include:
- 'pearson', which uses ``scipy.stats.pearsonr``
- 'mutual_info-regression', which uses ``mutual_info-regression``
from ``sklearn.feature_selection``
- 'mutual_info-classification', which uses ``mutual_info_classif``
from ``sklearn.feature_selection``
labels : list, default: None
A list of feature names to use. If a DataFrame is passed to fit and
features is None, feature names are selected as the column names.
sort : boolean, default: False
If false, the features are are not sorted in the plot; otherwise
features are sorted in ascending order of correlation.
feature_index : list,
A list of feature index to include in the plot.
feature_names : list of feature names
A list of feature names to include in the plot.
Must have labels or the fitted data is a DataFrame with column names.
If feature_index is provided, feature_names will be ignored.
color: string
Specify color for barchart
show: bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however you cannot
call ``plt.savefig`` from this signature, nor ``clear_figure``. If False, simply
calls ``finalize()``
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Returns
-------
visualizer : FeatureCorrelation
Returns the fitted visualizer.
"""
# Instantiate the visualizer
visualizer = FeatureCorrelation(
ax=ax,
method=method,
labels=labels,
sort=sort,
color=color,
feature_index=feature_index,
feature_names=feature_names,
**kwargs
)
# Fit and transform the visualizer (calls draw)
visualizer.fit(X, y, **kwargs)
if show:
visualizer.show()
else:
visualizer.finalize()
# Return the visualizer
return visualizer
| apache-2.0 |
laszlocsomor/tensorflow | tensorflow/contrib/eager/python/examples/resnet50/resnet50_test.py | 4 | 8293 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests and benchmarks for the ResNet50 model, executed eagerly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import tempfile
import time
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.resnet50 import resnet50
from tensorflow.contrib.summary import summary_test_util
from tensorflow.python.client import device_lib
def device_and_data_format():
return ('/gpu:0', 'channels_first') if tfe.num_gpus() else ('/cpu:0',
'channels_last')
def random_batch(batch_size):
_, data_format = device_and_data_format()
shape = (3, 224, 224) if data_format == 'channels_first' else (224, 224, 3)
shape = (batch_size,) + shape
num_classes = 1000
images = tf.random_uniform(shape)
labels = tf.random_uniform(
[batch_size], minval=0, maxval=num_classes, dtype=tf.int32)
one_hot = tf.one_hot(labels, num_classes)
return images, one_hot
def train_one_step(model, images, labels, optimizer):
def model_loss():
logits = model(images, training=True)
loss = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
tf.contrib.summary.scalar(name='loss', tensor=loss)
return loss
optimizer.minimize(model_loss)
class ResNet50Test(tf.test.TestCase):
def test_apply(self):
device, data_format = device_and_data_format()
model = resnet50.ResNet50(data_format)
with tf.device(device):
images, _ = random_batch(2)
output = model(images)
self.assertEqual((2, 1000), output.shape)
def test_apply_no_top(self):
device, data_format = device_and_data_format()
model = resnet50.ResNet50(data_format, include_top=False)
with tf.device(device):
images, _ = random_batch(2)
output = model(images)
output_shape = ((2, 2048, 1, 1)
if data_format == 'channels_first' else (2, 1, 1, 2048))
self.assertEqual(output_shape, output.shape)
def test_apply_with_pooling(self):
device, data_format = device_and_data_format()
model = resnet50.ResNet50(data_format, include_top=False, pooling='avg')
with tf.device(device):
images, _ = random_batch(2)
output = model(images)
self.assertEqual((2, 2048), output.shape)
def test_train(self):
device, data_format = device_and_data_format()
model = resnet50.ResNet50(data_format)
tf.train.get_or_create_global_step()
logdir = tempfile.mkdtemp()
with tf.contrib.summary.create_summary_file_writer(
logdir, max_queue=0,
name='t0').as_default(), tf.contrib.summary.always_record_summaries():
with tf.device(device):
optimizer = tf.train.GradientDescentOptimizer(0.1)
images, labels = random_batch(2)
train_one_step(model, images, labels, optimizer)
self.assertEqual(320, len(model.variables))
events = summary_test_util.events_from_logdir(logdir)
self.assertEqual(len(events), 2)
self.assertEqual(events[1].summary.value[0].tag, 'loss')
def test_no_garbage(self):
device, data_format = device_and_data_format()
model = resnet50.ResNet50(data_format)
optimizer = tf.train.GradientDescentOptimizer(0.1)
with tf.device(device):
images, labels = random_batch(2)
gc.disable()
# Warm up. Note that this first run does create significant amounts of
# garbage to be collected. The hope is that this is a build-only effect,
# and a subsequent training loop will create nothing which needs to be
# collected.
train_one_step(model, images, labels, optimizer)
gc.collect()
previous_gc_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
for _ in range(2):
# Run twice to ensure that garbage that is created on the first
# iteration is no longer accessible.
train_one_step(model, images, labels, optimizer)
gc.collect()
# There should be no garbage requiring collection.
self.assertEqual(0, len(gc.garbage))
gc.set_debug(previous_gc_debug_flags)
gc.enable()
class MockIterator(object):
def __init__(self, tensors):
self._tensors = [tf.identity(x) for x in tensors]
def next(self):
return self._tensors
class ResNet50Benchmarks(tf.test.Benchmark):
def _train_batch_sizes(self):
"""Choose batch sizes based on GPU capability."""
for device in device_lib.list_local_devices():
if 'GPU:0' in device.name:
# Avoid OOM errors with larger batch sizes, which seem to cause errors
# later on even if caught.
#
# TODO(allenl): Base this on device memory; memory limit information
# during the test seems to exclude the amount TensorFlow has allocated,
# which isn't useful.
if 'K20' in device.physical_device_desc:
return (16,)
if 'P100' in device.physical_device_desc:
return (16, 32, 64)
return (16, 32)
def _report(self, label, start, num_iters, device, batch_size, data_format):
avg_time = (time.time() - start) / num_iters
dev = 'cpu' if 'cpu' in device else 'gpu'
name = '%s_%s_batch_%d_%s' % (label, dev, batch_size, data_format)
extras = {'examples_per_sec': batch_size / avg_time}
self.report_benchmark(
iters=num_iters, wall_time=avg_time, name=name, extras=extras)
def _force_gpu_sync(self):
# If this function is called in the context of a GPU device
# (e.g., inside a 'with tf.device("/gpu:0")' block)
# then this will force a copy from CPU->GPU->CPU, which forces
# a sync. This is a roundabout way, yes.
tf.constant(1.).cpu()
def benchmark_eager_apply(self):
device, data_format = device_and_data_format()
model = resnet50.ResNet50(data_format)
batch_size = 64
num_burn = 5
num_iters = 30
with tf.device(device):
images, _ = random_batch(batch_size)
for _ in xrange(num_burn):
model(images).cpu()
gc.collect()
start = time.time()
for _ in xrange(num_iters):
model(images).cpu()
self._report('eager_apply', start, num_iters, device, batch_size,
data_format)
def _benchmark_eager_train(self, label, make_iterator):
device, data_format = device_and_data_format()
for batch_size in self._train_batch_sizes():
(images, labels) = random_batch(batch_size)
num_burn = 3
num_iters = 10
model = resnet50.ResNet50(data_format)
optimizer = tf.train.GradientDescentOptimizer(0.1)
with tf.device(device):
iterator = make_iterator((images, labels))
for _ in xrange(num_burn):
(images, labels) = iterator.next()
train_one_step(model, images, labels, optimizer)
self._force_gpu_sync()
gc.collect()
start = time.time()
for _ in xrange(num_iters):
(images, labels) = iterator.next()
train_one_step(model, images, labels, optimizer)
self._force_gpu_sync()
self._report(label, start, num_iters, device, batch_size, data_format)
def benchmark_eager_train(self):
self._benchmark_eager_train('eager_train', MockIterator)
def benchmark_eager_train_datasets(self):
def make_iterator(tensors):
with tf.device('/device:CPU:0'):
ds = tf.data.Dataset.from_tensors(tensors).repeat()
return tfe.Iterator(ds)
self._benchmark_eager_train('eager_train_dataset', make_iterator)
if __name__ == '__main__':
tfe.enable_eager_execution()
tf.test.main()
| apache-2.0 |
Akshay0724/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 66 | 7474 | r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
ningchi/scikit-learn | examples/plot_digits_pipe.py | 249 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
google/lasr | third_party/PerceptualSimilarity/data/image_folder.py | 2 | 2261 | ################################################################################
# Code from
# https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py
# Modified the original code so that it also loads images from the current
# directory as well as the subdirectories
################################################################################
import torch.utils.data as data
from PIL import Image
import os
import os.path
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
NP_EXTENSIONS = ['.npy',]
def is_image_file(filename, mode='img'):
if(mode=='img'):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
elif(mode=='np'):
return any(filename.endswith(extension) for extension in NP_EXTENSIONS)
def make_dataset(dirs, mode='img'):
if(not isinstance(dirs,list)):
dirs = [dirs,]
images = []
for dir in dirs:
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname, mode=mode):
path = os.path.join(root, fname)
images.append(path)
# print("Found %i images in %s"%(len(images),root))
return images
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False,
loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
| apache-2.0 |
woobe/h2o | py/testdir_single_jvm/test_exec2_apply_phrases.py | 1 | 3062 | import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_browse as h2b, h2o_exec as h2e, h2o_hosts, h2o_import as h2i
DO_COMPOUND = False
phrasesCompound = [
# use a dialetc with restricted grammar
# 1. all functions are on their own line
# 2. all functions only use data thru their params, or created in the function
# "a=1; a=2; function(x){x=a;a=3}",
# "a=r.hex; function(x){x=a;a=3;nrow(x)*a}(a)",
# "function(x){y=x*2; y+1}(2)",
# "mean2=function(x){apply(x,1,sum)/nrow(x)};mean2(r.hex)",
]
badPhrases = [
"&&",
"||",
"%*%",
"ifelse",
"cbind",
"print",
"apply",
"sapply",
"ddply",
"var",
"Reduce",
"cut",
"findInterval",
"runif",
"scale",
"t",
"seq_len",
"seq",
"rep_len",
"c",
"table",
"unique",
"factor",
]
phrases = [
"func1",
"func2",
"func3",
"func4",
"func5",
# "func6",
"nrow",
"ncol",
"length",
"is.factor",
"any.factor",
"any.na",
"isTRUE",
"min.na.rm",
"max.na.rm",
"min",
"max",
"xorsum",
]
if DO_COMPOUND:
phrases += phrasesCompound
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1, java_heap_GB=12)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_exec2_apply_phrases(self):
h2o.beta_features = True
bucket = 'home-0xdiag-datasets'
# csvPathname = 'standard/covtype.data'
csvPathname = "standard/covtype.shuffled.10pct.data"
hexKey = 'i.hex'
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='local', hex_key=hexKey)
for col in [1]:
initList = [
('r.hex', 'r.hex=i.hex'),
(None, "func1=function(x){max(x[,%s])}" % col),
(None, "func2=function(x){a=3;nrow(x[,%s])*a}" % col),
(None, "func3=function(x){apply(x[,%s],2,sum)/nrow(x[,%s])}" % (col, col) ),
# (None, "function(x) { cbind( mean(x[,1]), mean(x[,%s]) ) }" % col),
(None, "func4=function(x) { mean( x[,%s]) }" % col),
(None, "func5=function(x) { sd( x[,%s]) }" % col),
(None, "func6=function(x) { quantile(x[,%s] , c(0.9) ) }" % col),
]
for resultKey, execExpr in initList:
h2e.exec_expr(h2o.nodes[0], execExpr, resultKey=resultKey, timeoutSecs=60)
for p in phrases:
# execExpr = "apply(r.hex, c(2), " + p + ")"
execExpr = "apply(r.hex, 2, " + p + ")"
h2e.exec_expr(h2o.nodes[0], execExpr, resultKey=None, timeoutSecs=60)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
mwrightevent38/MissionPlanner | Lib/site-packages/scipy/stats/tests/test_stats.py | 55 | 71392 | """ Test functions for stats module
WRITTEN BY LOUIS LUANGKESORN <[email protected]> FOR THE STATS MODULE
BASED ON WILKINSON'S STATISTICS QUIZ
http://www.stanford.edu/~clint/bench/wilk.txt
Additional tests by a host of SciPy developers.
"""
from numpy.testing import TestCase, rand, assert_, assert_equal, \
assert_almost_equal, assert_array_almost_equal, assert_array_equal, \
assert_approx_equal, assert_raises, run_module_suite
from numpy import array, arange, zeros, ravel, float32, float64, power
import numpy as np
import sys
import scipy.stats as stats
""" Numbers in docstrings begining with 'W' refer to the section numbers
and headings found in the STATISTICS QUIZ of Leland Wilkinson. These are
considered to be essential functionality. True testing and
evaluation of a statistics package requires use of the
NIST Statistical test data. See McCoullough(1999) Assessing The Reliability
of Statistical Software for a test methodology and its
implementation in testing SAS, SPSS, and S-Plus
"""
## Datasets
## These data sets are from the nasty.dat sets used by Wilkinson
## for MISS, need to be able to represent missing values
## For completeness, I should write the relevant tests and count them as failures
## Somewhat acceptable, since this is still beta software. It would count as a
## good target for 1.0 status
X = array([1,2,3,4,5,6,7,8,9],float)
ZERO= array([0,0,0,0,0,0,0,0,0], float)
#MISS=array([.,.,.,.,.,.,.,.,.], float)
BIG=array([99999991,99999992,99999993,99999994,99999995,99999996,99999997,99999998,99999999],float)
LITTLE=array([0.99999991,0.99999992,0.99999993,0.99999994,0.99999995,0.99999996,0.99999997,0.99999998,0.99999999],float)
HUGE=array([1e+12,2e+12,3e+12,4e+12,5e+12,6e+12,7e+12,8e+12,9e+12],float)
TINY=array([1e-12,2e-12,3e-12,4e-12,5e-12,6e-12,7e-12,8e-12,9e-12],float)
ROUND=array([0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5,8.5],float)
X2 = X * X
X3 = X2 * X
X4 = X3 * X
X5 = X4 * X
X6 = X5 * X
X7 = X6 * X
X8 = X7 * X
X9 = X8 * X
class TestRound(TestCase):
""" W.II. ROUND
You should get the numbers 1 to 9. Many language compilers,
such as Turbo Pascal and Lattice C, fail this test (they round
numbers inconsistently). Needless to say, statical packages
written in these languages may fail the test as well. You can
also check the following expressions:
Y = INT(2.6*7 -0.2) (Y should be 18)
Y = 2-INT(EXP(LOG(SQR(2)*SQR(2)))) (Y should be 0)
Y = INT(3-EXP(LOG(SQR(2)*SQR(2)))) (Y should be 1)
INT is the integer function. It converts decimal numbers to
integers by throwing away numbers after the decimal point. EXP
is exponential, LOG is logarithm, and SQR is suqare root. You may
have to substitute similar names for these functions for different
packages. Since the square of a square root should return the same
number, and the exponential of a log should return the same number,
we should get back a 2 from this function of functions. By taking
the integer result and subtracting from 2, we are exposing the
roundoff errors. These simple functions are at the heart of
statistical calculations.
"""
def test_rounding0(self):
""" W.II.A.0. Print ROUND with only one digit.
You should get the numbers 1 to 9. Many language compilers,
such as Turbo Pascal and Lattice C, fail this test (they round
numbers inconsistently). Needless to say, statical packages
written in these languages may fail the test as well.
"""
if sys.version_info[0] >= 3:
# round to even
for i in range(0,9):
y = round(ROUND[i])
assert_equal(y, 2*((i+1)//2))
else:
for i in range(0,9):
y = round(ROUND[i])
assert_equal(y,i+1)
def test_rounding1(self):
""" W.II.A.1. Y = INT(2.6*7 -0.2) (Y should be 18)"""
y = int(2.6*7 -0.2)
assert_equal(y, 18)
def test_rounding2(self):
""" W.II.A.2. Y = 2-INT(EXP(LOG(SQR(2)*SQR(2)))) (Y should be 0)"""
y=2-int(np.exp(np.log(np.sqrt(2.)*np.sqrt(2.))))
assert_equal(y,0)
def test_rounding3(self):
""" W.II.A.3. Y = INT(3-EXP(LOG(SQR(2)*SQR(2)))) (Y should be 1)"""
y=(int(round((3-np.exp(np.log(np.sqrt(2.0)*np.sqrt(2.0)))))))
assert_equal(y,1)
class TestBasicStats(TestCase):
""" W.II.C. Compute basic statistic on all the variables.
The means should be the fifth value of all the variables (case FIVE).
The standard deviations should be "undefined" or missing for MISS,
0 for ZERO, and 2.738612788 (times 10 to a power) for all the other variables.
II. C. Basic Statistics
"""
dprec = np.finfo(np.float64).precision
# Really need to write these tests to handle missing values properly
def test_tmeanX(self):
y = stats.tmean(X, (2, 8), (True, True))
assert_approx_equal(y, 5.0, significant=TestBasicStats.dprec)
def test_tvarX(self):
y = stats.tvar(X, (2, 8), (True, True))
assert_approx_equal(y, 4.6666666666666661,
significant=TestBasicStats.dprec)
def test_tstdX(self):
y = stats.tstd(X, (2, 8), (True, True))
assert_approx_equal(y, 2.1602468994692865,
significant=TestBasicStats.dprec)
class TestNanFunc(TestCase):
def __init__(self, *args, **kw):
TestCase.__init__(self, *args, **kw)
self.X = X.copy()
self.Xall = X.copy()
self.Xall[:] = np.nan
self.Xsome = X.copy()
self.Xsomet = X.copy()
self.Xsome[0] = np.nan
self.Xsomet = self.Xsomet[1:]
def test_nanmean_none(self):
"""Check nanmean when no values are nan."""
m = stats.nanmean(X)
assert_approx_equal(m, X[4])
def test_nanmean_some(self):
"""Check nanmean when some values only are nan."""
m = stats.nanmean(self.Xsome)
assert_approx_equal(m, 5.5)
def test_nanmean_all(self):
"""Check nanmean when all values are nan."""
olderr = np.seterr(all='ignore')
try:
m = stats.nanmean(self.Xall)
finally:
np.seterr(**olderr)
assert_(np.isnan(m))
def test_nanstd_none(self):
"""Check nanstd when no values are nan."""
s = stats.nanstd(self.X)
assert_approx_equal(s, np.std(self.X, ddof=1))
def test_nanstd_some(self):
"""Check nanstd when some values only are nan."""
s = stats.nanstd(self.Xsome)
assert_approx_equal(s, np.std(self.Xsomet, ddof=1))
def test_nanstd_all(self):
"""Check nanstd when all values are nan."""
olderr = np.seterr(all='ignore')
try:
s = stats.nanstd(self.Xall)
finally:
np.seterr(**olderr)
assert_(np.isnan(s))
def test_nanstd_negative_axis(self):
x = np.array([1, 2, 3])
assert_equal(stats.nanstd(x, -1), 1)
def test_nanmedian_none(self):
"""Check nanmedian when no values are nan."""
m = stats.nanmedian(self.X)
assert_approx_equal(m, np.median(self.X))
def test_nanmedian_some(self):
"""Check nanmedian when some values only are nan."""
m = stats.nanmedian(self.Xsome)
assert_approx_equal(m, np.median(self.Xsomet))
def test_nanmedian_all(self):
"""Check nanmedian when all values are nan."""
m = stats.nanmedian(self.Xall)
assert_(np.isnan(m))
def test_nanmedian_scalars(self):
"""Check nanmedian for scalar inputs. See ticket #1098."""
assert_equal(stats.nanmedian(1), np.median(1))
assert_equal(stats.nanmedian(True), np.median(True))
assert_equal(stats.nanmedian(np.array(1)), np.median(np.array(1)))
assert_equal(stats.nanmedian(np.nan), np.median(np.nan))
class TestCorrPearsonr(TestCase):
""" W.II.D. Compute a correlation matrix on all the variables.
All the correlations, except for ZERO and MISS, shoud be exactly 1.
ZERO and MISS should have undefined or missing correlations with the
other variables. The same should go for SPEARMAN corelations, if
your program has them.
"""
def test_pXX(self):
y = stats.pearsonr(X,X)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXBIG(self):
y = stats.pearsonr(X,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXLITTLE(self):
y = stats.pearsonr(X,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXHUGE(self):
y = stats.pearsonr(X,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXTINY(self):
y = stats.pearsonr(X,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pXROUND(self):
y = stats.pearsonr(X,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGBIG(self):
y = stats.pearsonr(BIG,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGLITTLE(self):
y = stats.pearsonr(BIG,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGHUGE(self):
y = stats.pearsonr(BIG,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGTINY(self):
y = stats.pearsonr(BIG,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pBIGROUND(self):
y = stats.pearsonr(BIG,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLELITTLE(self):
y = stats.pearsonr(LITTLE,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLEHUGE(self):
y = stats.pearsonr(LITTLE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLETINY(self):
y = stats.pearsonr(LITTLE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pLITTLEROUND(self):
y = stats.pearsonr(LITTLE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pHUGEHUGE(self):
y = stats.pearsonr(HUGE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_pHUGETINY(self):
y = stats.pearsonr(HUGE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pHUGEROUND(self):
y = stats.pearsonr(HUGE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pTINYTINY(self):
y = stats.pearsonr(TINY,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_pTINYROUND(self):
y = stats.pearsonr(TINY,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_pROUNDROUND(self):
y = stats.pearsonr(ROUND,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_r_exactly_pos1(self):
a = arange(3.0)
b = a
r, prob = stats.pearsonr(a,b)
assert_equal(r, 1.0)
assert_equal(prob, 0.0)
def test_r_exactly_neg1(self):
a = arange(3.0)
b = -a
r, prob = stats.pearsonr(a,b)
assert_equal(r, -1.0)
assert_equal(prob, 0.0)
def test_fisher_exact():
"""Some tests to show that fisher_exact() works correctly.
Testing the hypergeometric survival function against R's, showing that one
of them (probably Scipy's) is slightly defective (see the test with
significant=1). This is probably because, in distributions.py, Scipy
uses 1.0 - cdf as the sf instead of calculating the sf more directly
for improved numerical accuracy.
Also note that R and Scipy have different argument formats for their
hypergeometric distrib functions.
R:
> phyper(18999, 99000, 110000, 39000, lower.tail = FALSE)
[1] 1.701815e-09
"""
fisher_exact = stats.fisher_exact
res = fisher_exact([[18000, 80000], [20000, 90000]])[1]
assert_approx_equal(res, 0.2751, significant=4)
res = fisher_exact([[14500, 20000], [30000, 40000]])[1]
assert_approx_equal(res, 0.01106, significant=4)
res = fisher_exact([[100, 2], [1000, 5]])[1]
assert_approx_equal(res, 0.1301, significant=4)
res = fisher_exact([[2, 7], [8, 2]])[1]
assert_approx_equal(res, 0.0230141, significant=6)
res = fisher_exact([[5, 1], [10, 10]])[1]
assert_approx_equal(res, 0.1973244, significant=6)
res = fisher_exact([[5, 15], [20, 20]])[1]
assert_approx_equal(res, 0.0958044, significant=6)
res = fisher_exact([[5, 16], [20, 25]])[1]
assert_approx_equal(res, 0.1725862, significant=6)
res = fisher_exact([[10, 5], [10, 1]])[1]
assert_approx_equal(res, 0.1973244, significant=6)
res = fisher_exact([[5, 0], [1, 4]])[1]
assert_approx_equal(res, 0.04761904, significant=6)
res = fisher_exact([[0, 1], [3, 2]])[1]
assert_approx_equal(res, 1.0)
res = fisher_exact([[0, 2], [6, 4]])[1]
assert_approx_equal(res, 0.4545454545)
res = fisher_exact([[2, 7], [8, 2]])
assert_approx_equal(res[1], 0.0230141, significant=6)
assert_approx_equal(res[0], 4.0 / 56)
# High tolerance due to survival function inaccuracy.
res = fisher_exact([[19000, 80000], [20000, 90000]])[1]
assert_approx_equal(res, 3.319e-9, significant=1)
# results from R
#
# R defines oddsratio differently (see Notes section of fisher_exact
# docstring), so those will not match. We leave them in anyway, in
# case they will be useful later on. We test only the p-value.
tablist = [
([[100, 2], [1000, 5]], (2.505583993422285e-001, 1.300759363430016e-001)),
([[2, 7], [8, 2]], (8.586235135736206e-002, 2.301413756522114e-002)),
([[5, 1], [10, 10]], (4.725646047336584e+000, 1.973244147157190e-001)),
([[5, 15], [20, 20]], (3.394396617440852e-001, 9.580440012477637e-002)),
([[5, 16], [20, 25]], (3.960558326183334e-001, 1.725864953812994e-001)),
([[10, 5], [10, 1]], (2.116112781158483e-001, 1.973244147157190e-001)),
([[10, 5], [10, 0]], (0.000000000000000e+000, 6.126482213438734e-002)),
([[5, 0], [1, 4]], (np.inf, 4.761904761904762e-002)),
([[0, 5], [1, 4]], (0.000000000000000e+000, 1.000000000000000e+000)),
([[5, 1], [0, 4]], (np.inf, 4.761904761904758e-002)),
([[0, 1], [3, 2]], (0.000000000000000e+000, 1.000000000000000e+000))
]
for table, res_r in tablist:
res = fisher_exact(np.asarray(table))
np.testing.assert_almost_equal(res[1], res_r[1], decimal=11,
verbose=True)
# test we raise an error for wrong shape of input.
assert_raises(ValueError, fisher_exact, np.arange(6).reshape(2, 3))
class TestCorrSpearmanr(TestCase):
""" W.II.D. Compute a correlation matrix on all the variables.
All the correlations, except for ZERO and MISS, shoud be exactly 1.
ZERO and MISS should have undefined or missing correlations with the
other variables. The same should go for SPEARMAN corelations, if
your program has them.
"""
def test_sXX(self):
y = stats.spearmanr(X,X)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXBIG(self):
y = stats.spearmanr(X,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXLITTLE(self):
y = stats.spearmanr(X,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXHUGE(self):
y = stats.spearmanr(X,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXTINY(self):
y = stats.spearmanr(X,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sXROUND(self):
y = stats.spearmanr(X,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGBIG(self):
y = stats.spearmanr(BIG,BIG)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGLITTLE(self):
y = stats.spearmanr(BIG,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGHUGE(self):
y = stats.spearmanr(BIG,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGTINY(self):
y = stats.spearmanr(BIG,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sBIGROUND(self):
y = stats.spearmanr(BIG,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLELITTLE(self):
y = stats.spearmanr(LITTLE,LITTLE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLEHUGE(self):
y = stats.spearmanr(LITTLE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLETINY(self):
y = stats.spearmanr(LITTLE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sLITTLEROUND(self):
y = stats.spearmanr(LITTLE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sHUGEHUGE(self):
y = stats.spearmanr(HUGE,HUGE)
r = y[0]
assert_approx_equal(r,1.0)
def test_sHUGETINY(self):
y = stats.spearmanr(HUGE,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sHUGEROUND(self):
y = stats.spearmanr(HUGE,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sTINYTINY(self):
y = stats.spearmanr(TINY,TINY)
r = y[0]
assert_approx_equal(r,1.0)
def test_sTINYROUND(self):
y = stats.spearmanr(TINY,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
def test_sROUNDROUND(self):
y = stats.spearmanr(ROUND,ROUND)
r = y[0]
assert_approx_equal(r,1.0)
class TestCorrSpearmanrTies(TestCase):
"""Some tests of tie-handling by the spearmanr function."""
def test_tie1(self):
# Data
x = [1.0, 2.0, 3.0, 4.0]
y = [1.0, 2.0, 2.0, 3.0]
# Ranks of the data, with tie-handling.
xr = [1.0, 2.0, 3.0, 4.0]
yr = [1.0, 2.5, 2.5, 4.0]
# Result of spearmanr should be the same as applying
# pearsonr to the ranks.
sr = stats.spearmanr(x, y)
pr = stats.pearsonr(xr, yr)
assert_almost_equal(sr, pr)
## W.II.E. Tabulate X against X, using BIG as a case weight. The values
## should appear on the diagonal and the total should be 899999955.
## If the table cannot hold these values, forget about working with
## census data. You can also tabulate HUGE against TINY. There is no
## reason a tabulation program should not be able to distinguish
## different values regardless of their magnitude.
### I need to figure out how to do this one.
def test_kendalltau():
"""Some tests for kendalltau."""
# with some ties
x1 = [12, 2, 1, 12, 2]
x2 = [1, 4, 7, 1, 0]
res = (-0.47140452079103173, 0.24821309157521476)
expected = stats.kendalltau(x1, x2)
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# check two different sort methods
assert_approx_equal(stats.kendalltau(x1, x2, initial_lexsort=False)[1],
stats.kendalltau(x1, x2, initial_lexsort=True)[1])
# and with larger arrays
np.random.seed(7546)
x = np.array([np.random.normal(loc=1, scale=1, size=500),
np.random.normal(loc=1, scale=1, size=500)])
corr = [[1.0, 0.3],
[0.3, 1.0]]
x = np.dot(np.linalg.cholesky(corr), x)
expected = (0.19291382765531062, 1.1337108207276285e-10)
res = stats.kendalltau(x[0], x[1])
assert_approx_equal(res[0], expected[0])
assert_approx_equal(res[1], expected[1])
# and do we get a tau of 1 for identical inputs?
assert_approx_equal(stats.kendalltau([1,1,2], [1,1,2])[0], 1.0)
class TestRegression(TestCase):
def test_linregressBIGX(self):
""" W.II.F. Regress BIG on X.
The constant should be 99999990 and the regression coefficient should be 1.
"""
y = stats.linregress(X,BIG)
intercept = y[1]
r=y[2]
assert_almost_equal(intercept,99999990)
assert_almost_equal(r,1.0)
## W.IV.A. Take the NASTY dataset above. Use the variable X as a
## basis for computing polynomials. Namely, compute X1=X, X2=X*X,
## X3=X*X*X, and so on up to 9 products. Use the algebraic
## transformation language within the statistical package itself. You
## will end up with 9 variables. Now regress X1 on X2-X9 (a perfect
## fit). If the package balks (singular or roundoff error messages),
## try X1 on X2-X8, and so on. Most packages cannot handle more than
## a few polynomials.
## Scipy's stats.py does not seem to handle multiple linear regression
## The datasets X1 . . X9 are at the top of the file.
def test_regressXX(self):
""" W.IV.B. Regress X on X.
The constant should be exactly 0 and the regression coefficient should be 1.
This is a perfectly valid regression. The program should not complain.
"""
y = stats.linregress(X,X)
intercept = y[1]
r=y[2]
assert_almost_equal(intercept,0.0)
assert_almost_equal(r,1.0)
## W.IV.C. Regress X on BIG and LITTLE (two predictors). The program
## should tell you that this model is "singular" because BIG and
## LITTLE are linear combinations of each other. Cryptic error
## messages are unacceptable here. Singularity is the most
## fundamental regression error.
### Need to figure out how to handle multiple linear regression. Not obvious
def test_regressZEROX(self):
""" W.IV.D. Regress ZERO on X.
The program should inform you that ZERO has no variance or it should
go ahead and compute the regression and report a correlation and
total sum of squares of exactly 0.
"""
y = stats.linregress(X,ZERO)
intercept = y[1]
r=y[2]
assert_almost_equal(intercept,0.0)
assert_almost_equal(r,0.0)
def test_regress_simple(self):
"""Regress a line with sinusoidal noise."""
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
res = stats.linregress(x, y)
assert_almost_equal(res[4], 2.3957814497838803e-3) #4.3609875083149268e-3)
def test_regress_simple_onearg_rows(self):
"""Regress a line with sinusoidal noise, with a single input of shape
(2, N).
"""
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
rows = np.vstack((x, y))
res = stats.linregress(rows)
assert_almost_equal(res[4], 2.3957814497838803e-3) #4.3609875083149268e-3)
def test_regress_simple_onearg_cols(self):
"""Regress a line with sinusoidal noise, with a single input of shape
(N, 2).
"""
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
cols = np.hstack((np.expand_dims(x, 1), np.expand_dims(y, 1)))
res = stats.linregress(cols)
assert_almost_equal(res[4], 2.3957814497838803e-3) #4.3609875083149268e-3)
def test_regress_shape_error(self):
"""Check that a single input argument to linregress with wrong shape
results in a ValueError."""
assert_raises(ValueError, stats.linregress, np.ones((3, 3)))
def test_linregress(self):
'''compared with multivariate ols with pinv'''
x = np.arange(11)
y = np.arange(5,16)
y[[(1),(-2)]] -= 1
y[[(0),(-1)]] += 1
res = (1.0, 5.0, 0.98229948625750, 7.45259691e-008, 0.063564172616372733)
assert_array_almost_equal(stats.linregress(x,y),res,decimal=14)
class TestHistogram(TestCase):
""" Tests that histogram works as it should, and keeps old behaviour
"""
# what is untested:
# - multidimensional arrays (since 'a' is ravel'd as the first line in the method)
# - very large arrays
# - Nans, Infs, empty and otherwise bad inputs
# sample arrays to test the histogram with
low_values = np.array([0.2, 0.3, 0.4, 0.5, 0.5, 0.6, 0.7, 0.8, 0.9, 1.1, 1.2],
dtype=float) # 11 values
high_range = np.array([2, 3, 4, 2, 21, 32, 78, 95, 65, 66, 66, 66, 66, 4],
dtype=float) # 14 values
low_range = np.array([2, 3, 3, 2, 3, 2.4, 2.1, 3.1, 2.9, 2.6, 2.7, 2.8, 2.2, 2.001],
dtype=float) # 14 values
few_values = np.array([2.0, 3.0, -1.0, 0.0], dtype=float) # 4 values
def test_simple(self):
""" Tests that each of the tests works as expected with default params
"""
# basic tests, with expected results (no weighting)
# results taken from the previous (slower) version of histogram
basic_tests = ((self.low_values, (np.array([ 1., 1., 1., 2., 2.,
1., 1., 0., 1., 1.]),
0.14444444444444446, 0.11111111111111112, 0)),
(self.high_range, (np.array([ 5., 0., 1., 1., 0.,
0., 5., 1., 0., 1.]),
-3.1666666666666661, 10.333333333333332, 0)),
(self.low_range, (np.array([ 3., 1., 1., 1., 0., 1.,
1., 2., 3., 1.]),
1.9388888888888889, 0.12222222222222223, 0)),
(self.few_values, (np.array([ 1., 0., 1., 0., 0., 0.,
0., 1., 0., 1.]),
-1.2222222222222223, 0.44444444444444448, 0)),
)
for inputs, expected_results in basic_tests:
given_results = stats.histogram(inputs)
assert_array_almost_equal(expected_results[0], given_results[0],
decimal=2)
for i in range(1, 4):
assert_almost_equal(expected_results[i], given_results[i],
decimal=2)
def test_weighting(self):
""" Tests that weights give expected histograms
"""
# basic tests, with expected results, given a set of weights
# weights used (first n are used for each test, where n is len of array) (14 values)
weights = np.array([1., 3., 4.5, 0.1, -1.0, 0.0, 0.3, 7.0, 103.2, 2, 40, 0, 0, 1])
# results taken from the numpy version of histogram
basic_tests = ((self.low_values, (np.array([ 4.0, 0.0, 4.5, -0.9, 0.0,
0.3,110.2, 0.0, 0.0, 42.0]),
0.2, 0.1, 0)),
(self.high_range, (np.array([ 9.6, 0. , -1. , 0. , 0. ,
0. ,145.2, 0. , 0.3, 7. ]),
2.0, 9.3, 0)),
(self.low_range, (np.array([ 2.4, 0. , 0. , 0. , 0. ,
2. , 40. , 0. , 103.2, 13.5]),
2.0, 0.11, 0)),
(self.few_values, (np.array([ 4.5, 0. , 0.1, 0. , 0. , 0. ,
0. , 1. , 0. , 3. ]),
-1., 0.4, 0)),
)
for inputs, expected_results in basic_tests:
# use the first lot of weights for test
# default limits given to reproduce output of numpy's test better
given_results = stats.histogram(inputs, defaultlimits=(inputs.min(),
inputs.max()),
weights=weights[:len(inputs)])
assert_array_almost_equal(expected_results[0], given_results[0],
decimal=2)
for i in range(1, 4):
assert_almost_equal(expected_results[i], given_results[i],
decimal=2)
def test_reduced_bins(self):
""" Tests that reducing the number of bins produces expected results
"""
# basic tests, with expected results (no weighting),
# except number of bins is halved to 5
# results taken from the previous (slower) version of histogram
basic_tests = ((self.low_values, (np.array([ 2., 3., 3., 1., 2.]),
0.075000000000000011, 0.25, 0)),
(self.high_range, (np.array([ 5., 2., 0., 6., 1.]),
-9.625, 23.25, 0)),
(self.low_range, (np.array([ 4., 2., 1., 3., 4.]),
1.8625, 0.27500000000000002, 0)),
(self.few_values, (np.array([ 1., 1., 0., 1., 1.]),
-1.5, 1.0, 0)),
)
for inputs, expected_results in basic_tests:
given_results = stats.histogram(inputs, numbins=5)
assert_array_almost_equal(expected_results[0], given_results[0],
decimal=2)
for i in range(1, 4):
assert_almost_equal(expected_results[i], given_results[i],
decimal=2)
def test_increased_bins(self):
""" Tests that increasing the number of bins produces expected results
"""
# basic tests, with expected results (no weighting),
# except number of bins is double to 20
# results taken from the previous (slower) version of histogram
basic_tests = ((self.low_values, (np.array([ 1., 0., 1., 0., 1.,
0., 2., 0., 1., 0.,
1., 1., 0., 1., 0.,
0., 0., 1., 0., 1.]),
0.1736842105263158, 0.052631578947368418, 0)),
(self.high_range, (np.array([ 5., 0., 0., 0., 1.,
0., 1., 0., 0., 0.,
0., 0., 0., 5., 0.,
0., 1., 0., 0., 1.]),
-0.44736842105263142, 4.8947368421052628, 0)),
(self.low_range, (np.array([ 3., 0., 1., 1., 0., 0.,
0., 1., 0., 0., 1., 0.,
1., 0., 1., 0., 1., 3.,
0., 1.]),
1.9710526315789474, 0.057894736842105263, 0)),
(self.few_values, (np.array([ 1., 0., 0., 0., 0., 1.,
0., 0., 0., 0., 0., 0.,
0., 0., 1., 0., 0., 0.,
0., 1.]),
-1.1052631578947367, 0.21052631578947367, 0)),
)
for inputs, expected_results in basic_tests:
given_results = stats.histogram(inputs, numbins=20)
assert_array_almost_equal(expected_results[0], given_results[0],
decimal=2)
for i in range(1, 4):
assert_almost_equal(expected_results[i], given_results[i],
decimal=2)
def test_cumfreq():
x = [1, 4, 2, 1, 3, 1]
cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4)
assert_array_almost_equal(cumfreqs, np.array([ 3., 4., 5., 6.]))
cumfreqs, lowlim, binsize, extrapoints = stats.cumfreq(x, numbins=4,
defaultreallimits=(1.5, 5))
assert_(extrapoints==3)
def test_relfreq():
a = np.array([1, 4, 2, 1, 3, 1])
relfreqs, lowlim, binsize, extrapoints = stats.relfreq(a, numbins=4)
assert_array_almost_equal(relfreqs, array([0.5, 0.16666667, 0.16666667, 0.16666667]))
# check array_like input is accepted
relfreqs2, lowlim, binsize, extrapoints = stats.relfreq([1, 4, 2, 1, 3, 1], numbins=4)
assert_array_almost_equal(relfreqs, relfreqs2)
# Utility
def compare_results(res,desired):
for i in range(len(desired)):
assert_array_equal(res[i],desired[i])
##################################################
### Test for sum
class TestGMean(TestCase):
def test_1D_list(self):
a = (1,2,3,4)
actual= stats.gmean(a)
desired = power(1*2*3*4,1./4.)
assert_almost_equal(actual, desired,decimal=14)
desired1 = stats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
def test_1D_array(self):
a = array((1,2,3,4), float32)
actual= stats.gmean(a)
desired = power(1*2*3*4,1./4.)
assert_almost_equal(actual, desired, decimal=7)
desired1 = stats.gmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=7)
def test_2D_array_default(self):
a = array(((1,2,3,4),
(1,2,3,4),
(1,2,3,4)))
actual= stats.gmean(a)
desired = array((1,2,3,4))
assert_array_almost_equal(actual, desired, decimal=14)
desired1 = stats.gmean(a,axis=0)
assert_array_almost_equal(actual, desired1, decimal=14)
def test_2D_array_dim1(self):
a = array(((1,2,3,4),
(1,2,3,4),
(1,2,3,4)))
actual= stats.gmean(a, axis=1)
v = power(1*2*3*4,1./4.)
desired = array((v,v,v))
assert_array_almost_equal(actual, desired, decimal=14)
def test_large_values(self):
a = array([1e100, 1e200, 1e300])
actual = stats.gmean(a)
assert_approx_equal(actual, 1e200, significant=14)
class TestHMean(TestCase):
def test_1D_list(self):
a = (1,2,3,4)
actual= stats.hmean(a)
desired = 4. / (1./1 + 1./2 + 1./3 + 1./4)
assert_almost_equal(actual, desired, decimal=14)
desired1 = stats.hmean(array(a),axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
def test_1D_array(self):
a = array((1,2,3,4), float64)
actual= stats.hmean(a)
desired = 4. / (1./1 + 1./2 + 1./3 + 1./4)
assert_almost_equal(actual, desired, decimal=14)
desired1 = stats.hmean(a,axis=-1)
assert_almost_equal(actual, desired1, decimal=14)
def test_2D_array_default(self):
a = array(((1,2,3,4),
(1,2,3,4),
(1,2,3,4)))
actual = stats.hmean(a)
desired = array((1.,2.,3.,4.))
assert_array_almost_equal(actual, desired, decimal=14)
actual1 = stats.hmean(a,axis=0)
assert_array_almost_equal(actual1, desired, decimal=14)
def test_2D_array_dim1(self):
a = array(((1,2,3,4),
(1,2,3,4),
(1,2,3,4)))
v = 4. / (1./1 + 1./2 + 1./3 + 1./4)
desired1 = array((v,v,v))
actual1 = stats.hmean(a, axis=1)
assert_array_almost_equal(actual1, desired1, decimal=14)
class TestPercentile(TestCase):
def setUp(self):
self.a1 = [3,4,5,10,-3,-5,6]
self.a2 = [3,-6,-2,8,7,4,2,1]
self.a3 = [3.,4,5,10,-3,-5,-6,7.0]
def test_percentile(self):
x = arange(8) * 0.5
assert_equal(stats.scoreatpercentile(x, 0), 0.)
assert_equal(stats.scoreatpercentile(x, 100), 3.5)
assert_equal(stats.scoreatpercentile(x, 50), 1.75)
def test_2D(self):
x = array([[1, 1, 1],
[1, 1, 1],
[4, 4, 3],
[1, 1, 1],
[1, 1, 1]])
assert_array_equal(stats.scoreatpercentile(x,50),
[1,1,1])
class TestCMedian(TestCase):
def test_basic(self):
data = [1,2,3,1,5,3,6,4,3,2,4,3,5,2.0]
assert_almost_equal(stats.cmedian(data,5),3.2916666666666665)
assert_almost_equal(stats.cmedian(data,3),3.083333333333333)
assert_almost_equal(stats.cmedian(data),3.0020020020020022)
class TestMode(TestCase):
def test_basic(self):
data1 = [3,5,1,10,23,3,2,6,8,6,10,6]
vals = stats.mode(data1)
assert_almost_equal(vals[0][0],6)
assert_almost_equal(vals[1][0],3)
class TestVariability(TestCase):
""" Comparison numbers are found using R v.1.5.1
note that length(testcase) = 4
"""
testcase = [1,2,3,4]
def test_signaltonoise(self):
"""
this is not in R, so used
mean(testcase,axis=0)/(sqrt(var(testcase)*3/4)) """
#y = stats.signaltonoise(self.shoes[0])
#assert_approx_equal(y,4.5709967)
y = stats.signaltonoise(self.testcase)
assert_approx_equal(y,2.236067977)
def test_sem(self):
"""
this is not in R, so used
sqrt(var(testcase)*3/4)/sqrt(3)
"""
#y = stats.sem(self.shoes[0])
#assert_approx_equal(y,0.775177399)
y = stats.sem(self.testcase)
assert_approx_equal(y,0.6454972244)
def test_zmap(self):
"""
not in R, so tested by using
(testcase[i]-mean(testcase,axis=0))/sqrt(var(testcase)*3/4)
"""
y = stats.zmap(self.testcase,self.testcase)
desired = ([-1.3416407864999, -0.44721359549996 , 0.44721359549996 , 1.3416407864999])
assert_array_almost_equal(desired,y,decimal=12)
def test_zscore(self):
"""
not in R, so tested by using
(testcase[i]-mean(testcase,axis=0))/sqrt(var(testcase)*3/4)
"""
y = stats.zscore(self.testcase)
desired = ([-1.3416407864999, -0.44721359549996 , 0.44721359549996 , 1.3416407864999])
assert_array_almost_equal(desired,y,decimal=12)
class TestMoments(TestCase):
"""
Comparison numbers are found using R v.1.5.1
note that length(testcase) = 4
testmathworks comes from documentation for the
Statistics Toolbox for Matlab and can be found at both
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/kurtosis.shtml
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/skewness.shtml
Note that both test cases came from here.
"""
testcase = [1,2,3,4]
testmathworks = [1.165 , 0.6268, 0.0751, 0.3516, -0.6965]
def test_moment(self):
"""
mean((testcase-mean(testcase))**power,axis=0),axis=0))**power))"""
y = stats.moment(self.testcase,1)
assert_approx_equal(y,0.0,10)
y = stats.moment(self.testcase,2)
assert_approx_equal(y,1.25)
y = stats.moment(self.testcase,3)
assert_approx_equal(y,0.0)
y = stats.moment(self.testcase,4)
assert_approx_equal(y,2.5625)
def test_variation(self):
"""
variation = samplestd/mean """
## y = stats.variation(self.shoes[0])
## assert_approx_equal(y,21.8770668)
y = stats.variation(self.testcase)
assert_approx_equal(y,0.44721359549996, 10)
def test_skewness(self):
"""
sum((testmathworks-mean(testmathworks,axis=0))**3,axis=0)/
((sqrt(var(testmathworks)*4/5))**3)/5
"""
y = stats.skew(self.testmathworks)
assert_approx_equal(y,-0.29322304336607,10)
y = stats.skew(self.testmathworks,bias=0)
assert_approx_equal(y,-0.437111105023940,10)
y = stats.skew(self.testcase)
assert_approx_equal(y,0.0,10)
def test_skewness_scalar(self):
"""
`skew` must return a scalar for 1-dim input
"""
assert_equal(stats.skew(arange(10)), 0.0)
def test_kurtosis(self):
"""
sum((testcase-mean(testcase,axis=0))**4,axis=0)/((sqrt(var(testcase)*3/4))**4)/4
sum((test2-mean(testmathworks,axis=0))**4,axis=0)/((sqrt(var(testmathworks)*4/5))**4)/5
Set flags for axis = 0 and
fisher=0 (Pearson's defn of kurtosis for compatiability with Matlab)
"""
y = stats.kurtosis(self.testmathworks,0,fisher=0,bias=1)
assert_approx_equal(y, 2.1658856802973,10)
# Note that MATLAB has confusing docs for the following case
# kurtosis(x,0) gives an unbiased estimate of Pearson's skewness
# kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)
# The MATLAB docs imply that both should give Fisher's
y = stats.kurtosis(self.testmathworks,fisher=0,bias=0)
assert_approx_equal(y, 3.663542721189047,10)
y = stats.kurtosis(self.testcase,0,0)
assert_approx_equal(y,1.64)
def test_kurtosis_array_scalar(self):
assert_equal(type(stats.kurtosis([1,2,3])), float)
class TestThreshold(TestCase):
def test_basic(self):
a = [-1,2,3,4,5,-1,-2]
assert_array_equal(stats.threshold(a),a)
assert_array_equal(stats.threshold(a,3,None,0),
[0,0,3,4,5,0,0])
assert_array_equal(stats.threshold(a,None,3,0),
[-1,2,3,0,0,-1,-2])
assert_array_equal(stats.threshold(a,2,4,0),
[0,2,3,4,0,0,0])
# Hypothesis test tests
class TestStudentTest(TestCase):
X1 = np.array([-1, 0, 1])
X2 = np.array([0, 1, 2])
T1_0 = 0
P1_0 = 1
T1_1 = -1.732051
P1_1 = 0.2254033
T1_2 = -3.464102
P1_2 = 0.0741799
T2_0 = 1.732051
P2_0 = 0.2254033
def test_onesample(self):
t, p = stats.ttest_1samp(self.X1, 0)
assert_array_almost_equal(t, self.T1_0)
assert_array_almost_equal(p, self.P1_0)
t, p = stats.ttest_1samp(self.X2, 0)
assert_array_almost_equal(t, self.T2_0)
assert_array_almost_equal(p, self.P2_0)
t, p = stats.ttest_1samp(self.X1, 1)
assert_array_almost_equal(t, self.T1_1)
assert_array_almost_equal(p, self.P1_1)
t, p = stats.ttest_1samp(self.X1, 2)
assert_array_almost_equal(t, self.T1_2)
assert_array_almost_equal(p, self.P1_2)
def test_scoreatpercentile():
assert_equal(stats.scoreatpercentile(range(10), 50), 4.5)
assert_equal(stats.scoreatpercentile(range(10), 50, (2,7)), 4.5)
assert_equal(stats.scoreatpercentile(range(100), 50, (1,8)), 4.5)
assert_equal(stats.scoreatpercentile(np.array([1, 10 ,100]),
50, (10,100)),
55)
assert_equal(stats.scoreatpercentile(np.array([1, 10 ,100]),
50, (1,10)),
5.5)
def test_percentileofscore():
pcos = stats.percentileofscore
assert_equal(pcos([1,2,3,4,5,6,7,8,9,10],4), 40.0)
for (kind, result) in [('mean', 35.0),
('strict', 30.0),
('weak', 40.0)]:
yield assert_equal, pcos(np.arange(10) + 1,
4, kind=kind), \
result
# multiple - 2
for (kind, result) in [('rank', 45.0),
('strict', 30.0),
('weak', 50.0),
('mean', 40.0)]:
yield assert_equal, pcos([1,2,3,4,4,5,6,7,8,9],
4, kind=kind), \
result
# multiple - 3
assert_equal(pcos([1,2,3,4,4,4,5,6,7,8], 4), 50.0)
for (kind, result) in [('rank', 50.0),
('mean', 45.0),
('strict', 30.0),
('weak', 60.0)]:
yield assert_equal, pcos([1,2,3,4,4,4,5,6,7,8],
4, kind=kind), \
result
# missing
for kind in ('rank', 'mean', 'strict', 'weak'):
yield assert_equal, pcos([1,2,3,5,6,7,8,9,10,11],
4, kind=kind), \
30
#larger numbers
for (kind, result) in [('mean', 35.0),
('strict', 30.0),
('weak', 40.0)]:
yield assert_equal, \
pcos([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 40,
kind=kind), result
for (kind, result) in [('mean', 45.0),
('strict', 30.0),
('weak', 60.0)]:
yield assert_equal, \
pcos([10, 20, 30, 40, 40, 40, 50, 60, 70, 80],
40, kind=kind), result
for kind in ('rank', 'mean', 'strict', 'weak'):
yield assert_equal, \
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
40, kind=kind), 30.0
#boundaries
for (kind, result) in [('rank', 10.0),
('mean', 5.0),
('strict', 0.0),
('weak', 10.0)]:
yield assert_equal, \
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
10, kind=kind), result
for (kind, result) in [('rank', 100.0),
('mean', 95.0),
('strict', 90.0),
('weak', 100.0)]:
yield assert_equal, \
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
110, kind=kind), result
#out of bounds
for (kind, score, result) in [('rank', 200, 100.0),
('mean', 200, 100.0),
('mean', 0, 0.0)]:
yield assert_equal, \
pcos([10, 20, 30, 50, 60, 70, 80, 90, 100, 110],
score, kind=kind), result
def test_friedmanchisquare():
# see ticket:113
# verified with matlab and R
#From Demsar "Statistical Comparisons of Classifiers over Multiple Data Sets"
#2006, Xf=9.28 (no tie handling, tie corrected Xf >=9.28)
x1 = [array([0.763, 0.599, 0.954, 0.628, 0.882, 0.936, 0.661, 0.583,
0.775, 1.0, 0.94, 0.619, 0.972, 0.957]),
array([0.768, 0.591, 0.971, 0.661, 0.888, 0.931, 0.668, 0.583,
0.838, 1.0, 0.962, 0.666, 0.981, 0.978]),
array([0.771, 0.590, 0.968, 0.654, 0.886, 0.916, 0.609, 0.563,
0.866, 1.0, 0.965, 0.614, 0.9751, 0.946]),
array([0.798, 0.569, 0.967, 0.657, 0.898, 0.931, 0.685, 0.625,
0.875, 1.0, 0.962, 0.669, 0.975, 0.970])]
#From "Bioestadistica para las ciencias de la salud" Xf=18.95 p<0.001:
x2 = [array([4,3,5,3,5,3,2,5,4,4,4,3]),
array([2,2,1,2,3,1,2,3,2,1,1,3]),
array([2,4,3,3,4,3,3,4,4,1,2,1]),
array([3,5,4,3,4,4,3,3,3,4,4,4])]
#From Jerrorl H. Zar, "Biostatistical Analysis"(example 12.6), Xf=10.68, 0.005 < p < 0.01:
#Probability from this example is inexact using Chisquare aproximation of Friedman Chisquare.
x3 = [array([7.0,9.9,8.5,5.1,10.3]),
array([5.3,5.7,4.7,3.5,7.7]),
array([4.9,7.6,5.5,2.8,8.4]),
array([8.8,8.9,8.1,3.3,9.1])]
assert_array_almost_equal(stats.friedmanchisquare(x1[0],x1[1],x1[2],x1[3]),(10.2283464566929, 0.0167215803284414))
assert_array_almost_equal(stats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]),(18.9428571428571, 0.000280938375189499))
assert_array_almost_equal(stats.friedmanchisquare(x3[0],x3[1],x3[2],x3[3]),(10.68, 0.0135882729582176))
np.testing.assert_raises(ValueError, stats.friedmanchisquare,x3[0],x3[1])
# test using mstats
assert_array_almost_equal(stats.mstats.friedmanchisquare(x1[0],x1[1],x1[2],x1[3]),(10.2283464566929, 0.0167215803284414))
# the following fails
#assert_array_almost_equal(stats.mstats.friedmanchisquare(x2[0],x2[1],x2[2],x2[3]),(18.9428571428571, 0.000280938375189499))
assert_array_almost_equal(stats.mstats.friedmanchisquare(x3[0],x3[1],x3[2],x3[3]),(10.68, 0.0135882729582176))
np.testing.assert_raises(ValueError,stats.mstats.friedmanchisquare,x3[0],x3[1])
def test_kstest():
#from numpy.testing import assert_almost_equal
# comparing with values from R
x = np.linspace(-1,1,9)
D,p = stats.kstest(x,'norm')
assert_almost_equal( D, 0.15865525393145705, 12)
assert_almost_equal( p, 0.95164069201518386, 1)
x = np.linspace(-15,15,9)
D,p = stats.kstest(x,'norm')
assert_almost_equal( D, 0.44435602715924361, 15)
assert_almost_equal( p, 0.038850140086788665, 8)
# the following tests rely on deterministicaly replicated rvs
np.random.seed(987654321)
x = stats.norm.rvs(loc=0.2, size=100)
D,p = stats.kstest(x, 'norm', mode='asymp')
assert_almost_equal( D, 0.12464329735846891, 15)
assert_almost_equal( p, 0.089444888711820769, 15)
assert_almost_equal( np.array(stats.kstest(x, 'norm', mode='asymp')),
np.array((0.12464329735846891, 0.089444888711820769)), 15)
assert_almost_equal( np.array(stats.kstest(x,'norm', alternative = 'less')),
np.array((0.12464329735846891, 0.040989164077641749)), 15)
# this 'greater' test fails with precision of decimal=14
assert_almost_equal( np.array(stats.kstest(x,'norm', alternative = 'greater')),
np.array((0.0072115233216310994, 0.98531158590396228)), 12)
#missing: no test that uses *args
def test_ks_2samp():
#exact small sample solution
data1 = np.array([1.0,2.0])
data2 = np.array([1.0,2.0,3.0])
assert_almost_equal(np.array(stats.ks_2samp(data1+0.01,data2)),
np.array((0.33333333333333337, 0.99062316386915694)))
assert_almost_equal(np.array(stats.ks_2samp(data1-0.01,data2)),
np.array((0.66666666666666674, 0.42490954988801982)))
#these can also be verified graphically
assert_almost_equal(
np.array(stats.ks_2samp(np.linspace(1,100,100),
np.linspace(1,100,100)+2+0.1)),
np.array((0.030000000000000027, 0.99999999996005062)))
assert_almost_equal(
np.array(stats.ks_2samp(np.linspace(1,100,100),
np.linspace(1,100,100)+2-0.1)),
np.array((0.020000000000000018, 0.99999999999999933)))
#these are just regression tests
assert_almost_equal(
np.array(stats.ks_2samp(np.linspace(1,100,100),
np.linspace(1,100,110)+20.1)),
np.array((0.21090909090909091, 0.015880386730710221)))
assert_almost_equal(
np.array(stats.ks_2samp(np.linspace(1,100,100),
np.linspace(1,100,110)+20-0.1)),
np.array((0.20818181818181825, 0.017981441789762638)))
def test_ttest_rel():
#regression test
tr,pr = 0.81248591389165692, 0.41846234511362157
tpr = ([tr,-tr],[pr,pr])
rvs1 = np.linspace(1,100,100)
rvs2 = np.linspace(1.01,99.989,100)
rvs1_2D = np.array([np.linspace(1,100,100), np.linspace(1.01,99.989,100)])
rvs2_2D = np.array([np.linspace(1.01,99.989,100), np.linspace(1,100,100)])
t,p = stats.ttest_rel(rvs1, rvs2, axis=0)
assert_array_almost_equal([t,p],(tr,pr))
t,p = stats.ttest_rel(rvs1_2D.T, rvs2_2D.T, axis=0)
assert_array_almost_equal([t,p],tpr)
t,p = stats.ttest_rel(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal([t,p],tpr)
#test on 3 dimensions
rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
t,p = stats.ttest_rel(rvs1_3D, rvs2_3D, axis=1)
assert_array_almost_equal(np.abs(t), tr)
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
t,p = stats.ttest_rel(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), axis=2)
assert_array_almost_equal(np.abs(t), tr)
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
olderr = np.seterr(all='ignore')
try:
#test zero division problem
t,p = stats.ttest_rel([0,0,0],[1,1,1])
assert_equal((np.abs(t),p), (np.inf, 0))
assert_almost_equal(stats.ttest_rel([0,0,0], [0,0,0]), (1.0, 0.42264973081037421))
#check that nan in input array result in nan output
anan = np.array([[1,np.nan],[-1,1]])
assert_equal(stats.ttest_ind(anan, np.zeros((2,2))),([0, np.nan], [1,np.nan]))
finally:
np.seterr(**olderr)
def test_ttest_ind():
#regression test
tr = 1.0912746897927283
pr = 0.27647818616351882
tpr = ([tr,-tr],[pr,pr])
rvs2 = np.linspace(1,100,100)
rvs1 = np.linspace(5,105,100)
rvs1_2D = np.array([rvs1, rvs2])
rvs2_2D = np.array([rvs2, rvs1])
t,p = stats.ttest_ind(rvs1, rvs2, axis=0)
assert_array_almost_equal([t,p],(tr,pr))
t,p = stats.ttest_ind(rvs1_2D.T, rvs2_2D.T, axis=0)
assert_array_almost_equal([t,p],tpr)
t,p = stats.ttest_ind(rvs1_2D, rvs2_2D, axis=1)
assert_array_almost_equal([t,p],tpr)
#test on 3 dimensions
rvs1_3D = np.dstack([rvs1_2D,rvs1_2D,rvs1_2D])
rvs2_3D = np.dstack([rvs2_2D,rvs2_2D,rvs2_2D])
t,p = stats.ttest_ind(rvs1_3D, rvs2_3D, axis=1)
assert_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (2, 3))
t,p = stats.ttest_ind(np.rollaxis(rvs1_3D,2), np.rollaxis(rvs2_3D,2), axis=2)
assert_array_almost_equal(np.abs(t), np.abs(tr))
assert_array_almost_equal(np.abs(p), pr)
assert_equal(t.shape, (3, 2))
olderr = np.seterr(all='ignore')
try:
#test zero division problem
t,p = stats.ttest_ind([0,0,0],[1,1,1])
assert_equal((np.abs(t),p), (np.inf, 0))
assert_almost_equal(stats.ttest_ind([0,0,0], [0,0,0]), (1.0, 0.37390096630005898))
#check that nan in input array result in nan output
anan = np.array([[1,np.nan],[-1,1]])
assert_equal(stats.ttest_ind(anan, np.zeros((2,2))),([0, np.nan], [1,np.nan]))
finally:
np.seterr(**olderr)
def test_ttest_1samp_new():
n1, n2, n3 = (10,15,20)
rvn1 = stats.norm.rvs(loc=5,scale=10,size=(n1,n2,n3))
#check multidimensional array and correct axis handling
#deterministic rvn1 and rvn2 would be better as in test_ttest_rel
t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n2,n3)),axis=0)
t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=0)
t3,p3 = stats.ttest_1samp(rvn1[:,0,0], 1)
assert_array_almost_equal(t1,t2, decimal=14)
assert_almost_equal(t1[0,0],t3, decimal=14)
assert_equal(t1.shape, (n2,n3))
t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n3)),axis=1)
t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=1)
t3,p3 = stats.ttest_1samp(rvn1[0,:,0], 1)
assert_array_almost_equal(t1,t2, decimal=14)
assert_almost_equal(t1[0,0],t3, decimal=14)
assert_equal(t1.shape, (n1,n3))
t1,p1 = stats.ttest_1samp(rvn1[:,:,:], np.ones((n1,n2)),axis=2)
t2,p2 = stats.ttest_1samp(rvn1[:,:,:], 1,axis=2)
t3,p3 = stats.ttest_1samp(rvn1[0,0,:], 1)
assert_array_almost_equal(t1,t2, decimal=14)
assert_almost_equal(t1[0,0],t3, decimal=14)
assert_equal(t1.shape, (n1,n2))
olderr = np.seterr(all='ignore')
try:
#test zero division problem
t,p = stats.ttest_1samp([0,0,0], 1)
assert_equal((np.abs(t),p), (np.inf, 0))
assert_almost_equal(stats.ttest_1samp([0,0,0], 0), (1.0, 0.42264973081037421))
#check that nan in input array result in nan output
anan = np.array([[1,np.nan],[-1,1]])
assert_equal(stats.ttest_1samp(anan, 0),([0, np.nan], [1,np.nan]))
finally:
np.seterr(**olderr)
def test_describe():
x = np.vstack((np.ones((3,4)),2*np.ones((2,4))))
nc, mmc = (5, ([ 1., 1., 1., 1.], [ 2., 2., 2., 2.]))
mc = np.array([ 1.4, 1.4, 1.4, 1.4])
vc = np.array([ 0.3, 0.3, 0.3, 0.3])
skc = [0.40824829046386357]*4
kurtc = [-1.833333333333333]*4
n, mm, m, v, sk, kurt = stats.describe(x)
assert_equal(n, nc)
assert_equal(mm, mmc)
assert_equal(m, mc)
assert_equal(v, vc)
assert_array_almost_equal(sk, skc, decimal=13) #not sure about precision
assert_array_almost_equal(kurt, kurtc, decimal=13)
n, mm, m, v, sk, kurt = stats.describe(x.T, axis=1)
assert_equal(n, nc)
assert_equal(mm, mmc)
assert_equal(m, mc)
assert_equal(v, vc)
assert_array_almost_equal(sk, skc, decimal=13) #not sure about precision
assert_array_almost_equal(kurt, kurtc, decimal=13)
def test_normalitytests():
# numbers verified with R: dagoTest in package fBasics
st_normal, st_skew, st_kurt = (3.92371918, 1.98078826, -0.01403734)
pv_normal, pv_skew, pv_kurt = (0.14059673, 0.04761502, 0.98880019)
x = np.array((-2,-1,0,1,2,3)*4)**2
yield assert_array_almost_equal, stats.normaltest(x), (st_normal, pv_normal)
yield assert_array_almost_equal, stats.skewtest(x), (st_skew, pv_skew)
yield assert_array_almost_equal, stats.kurtosistest(x), (st_kurt, pv_kurt)
def mannwhitneyu():
x = np.array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1.,
2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1.])
y = np.array([ 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1.,
1., 2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 1., 2., 2., 1., 1., 2., 1., 1., 2.,
1., 2., 1., 1., 1., 1., 2., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1.])
#p-value verified with matlab and R to 5 significant digits
assert_array_almost_equal(stats.stats.mannwhitneyu(x,y),
(16980.5, 2.8214327656317373e-005), decimal=12)
def test_pointbiserial():
# copied from mstats tests removing nans
x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,
0,0,0,0,1]
y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,
2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,
0.8,0.7,0.6,0.5,0.2,0.2,0.1]
assert_almost_equal(stats.pointbiserialr(x, y)[0], 0.36149, 5)
def test_obrientransform():
#this is a regression test to check np.var replacement
#I didn't separately verigy the numbers
x1 = np.arange(5)
result = np.array(
[[ 5.41666667, 1.04166667, -0.41666667, 1.04166667, 5.41666667],
[ 21.66666667, 4.16666667, -1.66666667, 4.16666667, 21.66666667]])
assert_array_almost_equal(stats.obrientransform(x1, 2*x1), result, decimal=8)
class HarMeanTestCase:
def test_1dlist(self):
''' Test a 1d list'''
a=[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
b = 34.1417152147
self.do(a, b)
def test_1darray(self):
''' Test a 1d array'''
a=np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
b = 34.1417152147
self.do(a, b)
def test_1dma(self):
''' Test a 1d masked array'''
a=np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
b = 34.1417152147
self.do(a, b)
def test_1dmavalue(self):
''' Test a 1d masked array with a masked value'''
a=np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
mask=[0,0,0,0,0,0,0,0,0,1])
b = 31.8137186141
self.do(a, b)
# Note the next tests use axis=None as default, not axis=0
def test_2dlist(self):
''' Test a 2d list'''
a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = 38.6696271841
self.do(a, b)
def test_2darray(self):
''' Test a 2d array'''
a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = 38.6696271841
self.do(np.array(a), b)
def test_2dma(self):
''' Test a 2d masked array'''
a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = 38.6696271841
self.do(np.ma.array(a), b)
def test_2daxis0(self):
''' Test a 2d list with axis=0'''
a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.array([ 22.88135593, 39.13043478, 52.90076336, 65.45454545])
self.do(a, b, axis=0)
def test_2daxis1(self):
''' Test a 2d list with axis=1'''
a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.array([ 19.2 , 63.03939962, 103.80078637])
self.do(a, b, axis=1)
def test_2dmatrixdaxis0(self):
''' Test a 2d list with axis=0'''
a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.matrix([[ 22.88135593, 39.13043478, 52.90076336, 65.45454545]])
self.do(np.matrix(a), b, axis=0)
def test_2dmatrixaxis1(self):
''' Test a 2d list with axis=1'''
a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.matrix([[ 19.2 , 63.03939962, 103.80078637]]).T
self.do(np.matrix(a), b, axis=1)
## def test_dtype(self):
## ''' Test a 1d list with a new dtype'''
## a=[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
## b = 34.1417152147
## self.do(a, b, dtype=np.float128) # does not work on Win32
class TestHarMean(HarMeanTestCase, TestCase):
def do(self, a, b, axis=None, dtype=None):
x = stats.hmean(a, axis=axis, dtype=dtype)
assert_almost_equal(b, x)
assert_equal(x.dtype, dtype)
class GeoMeanTestCase:
def test_1dlist(self):
''' Test a 1d list'''
a=[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
b = 45.2872868812
self.do(a, b)
def test_1darray(self):
''' Test a 1d array'''
a=np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
b = 45.2872868812
self.do(a, b)
def test_1dma(self):
''' Test a 1d masked array'''
a=np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
b = 45.2872868812
self.do(a, b)
def test_1dmavalue(self):
''' Test a 1d masked array with a masked value'''
a=np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0,0,0,0,0,0,0,0,0,1])
b = 41.4716627439
self.do(a, b)
# Note the next tests use axis=None as default, not axis=0
def test_2dlist(self):
''' Test a 2d list'''
a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = 52.8885199
self.do(a, b)
def test_2darray(self):
''' Test a 2d array'''
a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = 52.8885199
self.do(np.array(a), b)
def test_2dma(self):
''' Test a 2d masked array'''
a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = 52.8885199
self.do(np.ma.array(a), b)
def test_2daxis0(self):
''' Test a 2d list with axis=0'''
a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.array([35.56893304, 49.32424149, 61.3579244 , 72.68482371])
self.do(a, b, axis=0)
def test_2daxis1(self):
''' Test a 2d list with axis=1'''
a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.array([ 22.13363839, 64.02171746, 104.40086817])
self.do(a, b, axis=1)
def test_2dmatrixdaxis0(self):
''' Test a 2d list with axis=0'''
a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.matrix([[35.56893304, 49.32424149, 61.3579244 , 72.68482371]])
self.do(np.matrix(a), b, axis=0)
def test_2dmatrixaxis1(self):
''' Test a 2d list with axis=1'''
a=[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
b = np.matrix([[ 22.13363839, 64.02171746, 104.40086817]]).T
self.do(np.matrix(a), b, axis=1)
## def test_dtype(self):
## ''' Test a 1d list with a new dtype'''
## a=[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
## b = 45.2872868812
## self.do(a, b, dtype=np.float128) # does not exist on win32
def test_1dlist0(self):
''' Test a 1d list with zero element'''
a=[10, 20, 30, 40, 50, 60, 70, 80, 90, 0]
b = 0.0 # due to exp(-inf)=0
olderr = np.seterr(all='ignore')
try:
self.do(a, b)
finally:
np.seterr(**olderr)
def test_1darray0(self):
''' Test a 1d array with zero element'''
a=np.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0])
b = 0.0 # due to exp(-inf)=0
olderr = np.seterr(all='ignore')
try:
self.do(a, b)
finally:
np.seterr(**olderr)
def test_1dma0(self):
''' Test a 1d masked array with zero element'''
a=np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0])
b = 41.4716627439
olderr = np.seterr(all='ignore')
try:
self.do(a, b)
finally:
np.seterr(**olderr)
def test_1dmainf(self):
''' Test a 1d masked array with negative element'''
a=np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, -1])
b = 41.4716627439
olderr = np.seterr(all='ignore')
try:
self.do(a, b)
finally:
np.seterr(**olderr)
class TestGeoMean(GeoMeanTestCase, TestCase):
def do(self, a, b, axis=None, dtype=None):
#Note this doesn't test when axis is not specified
x = stats.gmean(a, axis=axis, dtype=dtype)
assert_almost_equal(b, x)
assert_equal(x.dtype, dtype)
def test_binomtest():
# precision tests compared to R for ticket:986
pp = np.concatenate(( np.linspace(0.1,0.2,5), np.linspace(0.45,0.65,5),
np.linspace(0.85,0.95,5)))
n = 501
x = 450
results = [0.0, 0.0, 1.0159969301994141e-304,
2.9752418572150531e-275, 7.7668382922535275e-250,
2.3381250925167094e-099, 7.8284591587323951e-081,
9.9155947819961383e-065, 2.8729390725176308e-050,
1.7175066298388421e-037, 0.0021070691951093692,
0.12044570587262322, 0.88154763174802508, 0.027120993063129286,
2.6102587134694721e-006]
for p, res in zip(pp,results):
assert_approx_equal(stats.binom_test(x, n, p), res,
significant=12, err_msg='fail forp=%f'%p)
assert_approx_equal(stats.binom_test(50,100,0.1), 5.8320387857343647e-024,
significant=12, err_msg='fail forp=%f'%p)
class Test_Trim(object):
# test trim functions
def test_trim1(self):
a = np.arange(11)
assert_equal(stats.trim1(a, 0.1), np.arange(10))
assert_equal(stats.trim1(a, 0.2), np.arange(9))
assert_equal(stats.trim1(a, 0.2, tail='left'), np.arange(2,11))
assert_equal(stats.trim1(a, 3/11., tail='left'), np.arange(3,11))
def test_trimboth(self):
a = np.arange(11)
assert_equal(stats.trimboth(a, 3/11.), np.arange(3,8))
assert_equal(stats.trimboth(a, 0.2), np.array([2, 3, 4, 5, 6, 7, 8]))
assert_equal(stats.trimboth(np.arange(24).reshape(6,4), 0.2),
np.arange(4,20).reshape(4,4))
assert_equal(stats.trimboth(np.arange(24).reshape(4,6).T, 2/6.),
np.array([[ 2, 8, 14, 20],[ 3, 9, 15, 21]]))
assert_raises(ValueError, stats.trimboth,
np.arange(24).reshape(4,6).T, 4/6.)
def test_trim_mean(self):
assert_equal(stats.trim_mean(np.arange(24).reshape(4,6).T, 2/6.),
np.array([ 2.5, 8.5, 14.5, 20.5]))
assert_equal(stats.trim_mean(np.arange(24).reshape(4,6), 2/6.),
np.array([ 9., 10., 11., 12., 13., 14.]))
assert_equal(stats.trim_mean(np.arange(24), 2/6.), 11.5)
assert_equal(stats.trim_mean([5,4,3,1,2,0], 2/6.), 2.5)
class TestSigamClip(object):
def test_sigmaclip1(self):
a = np.concatenate((np.linspace(9.5,10.5,31),np.linspace(0,20,5)))
fact = 4 #default
c, low, upp = stats.sigmaclip(a)
assert_(c.min()>low)
assert_(c.max()<upp)
assert_equal(low, c.mean() - fact*c.std())
assert_equal(upp, c.mean() + fact*c.std())
assert_equal(c.size, a.size)
def test_sigmaclip2(self):
a = np.concatenate((np.linspace(9.5,10.5,31),np.linspace(0,20,5)))
fact = 1.5
c, low, upp = stats.sigmaclip(a, fact, fact)
assert_(c.min()>low)
assert_(c.max()<upp)
assert_equal(low, c.mean() - fact*c.std())
assert_equal(upp, c.mean() + fact*c.std())
assert_equal(c.size, 4)
assert_equal(a.size, 36) #check original array unchanged
def test_sigmaclip3(self):
a = np.concatenate((np.linspace(9.5,10.5,11),np.linspace(-100,-50,3)))
fact = 1.8
c, low, upp = stats.sigmaclip(a, fact, fact)
assert_(c.min()>low)
assert_(c.max()<upp)
assert_equal(low, c.mean() - fact*c.std())
assert_equal(upp, c.mean() + fact*c.std())
assert_equal(c, np.linspace(9.5,10.5,11))
class TestFOneWay(TestCase):
def test_trivial(self):
"""A trivial test of stats.f_oneway, with F=0."""
F, p = stats.f_oneway([0,2], [0,2])
assert_equal(F, 0.0)
def test_basic(self):
"""A test of stats.f_oneway, with F=2."""
F, p = stats.f_oneway([0,2], [2,4])
# Despite being a floating point calculation, this data should
# result in F being exactly 2.0.
assert_equal(F, 2.0)
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
Akshay0724/scikit-learn | examples/cluster/plot_digits_linkage.py | 366 | 2959 | """
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is especially pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, X, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout()
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s : %.2fs" % (linkage, time() - t0))
plot_clustering(X_red, X, clustering.labels_, "%s linkage" % linkage)
plt.show()
| bsd-3-clause |
raamana/pyradigm | pyradigm/tests/test_classify_dataset.py | 1 | 12763 | import os
import sys
from os.path import dirname, join as pjoin, realpath
import numpy as np
sys.dont_write_bytecode = True
from pytest import raises, warns
from pyradigm.classify import ClassificationDataset as ClfDataset
out_dir = '.'
num_targets = np.random.randint(2, 50)
target_sizes = np.random.randint(10, 100, num_targets)
num_features = np.random.randint(10, 100)
num_samples = sum(target_sizes)
target_set = np.array(['C{:05d}'.format(x) for x in range(num_targets)])
feat_names = np.array([str(x) for x in range(num_features)])
test_dataset = ClfDataset()
for target_index, target_id in enumerate(target_set):
for sub_ix in range(target_sizes[target_index]):
subj_id = '{}_S{:05d}'.format(target_set[target_index], sub_ix)
feat = np.random.random(num_features)
test_dataset.add_samplet(subj_id, feat, target_id,
feature_names=feat_names)
out_file = os.path.join(out_dir, 'random_example_dataset.pkl')
test_dataset.save(out_file)
# same IDs, new features
same_ids_new_feat = ClfDataset()
for sub_id in test_dataset.samplet_ids:
feat = np.random.random(num_features)
same_ids_new_feat.add_samplet(sub_id, feat,
test_dataset.targets[sub_id])
same_ids_new_feat.feature_names = np.array(['new_f{}'.format(x) for x in range(
num_features)])
test_dataset.description = 'test dataset'
print(test_dataset)
print('default format:\n {}'.format(test_dataset))
print('full repr :\n {:full}'.format(test_dataset))
print('string/short :\n {:s}'.format(test_dataset))
target_set, target_sizes = test_dataset.summarize()
reloaded_dataset = ClfDataset(dataset_path=out_file,
description='reloaded test_dataset')
copy_dataset = ClfDataset(in_dataset=test_dataset)
rand_index = np.random.randint(0, len(target_set), 1)[0]
random_target_name = target_set[rand_index]
random_target_ds = test_dataset.get_class(random_target_name)
other_targets_ds = test_dataset - random_target_ds
other_target_set = set(target_set) - set([random_target_name])
other_targets_get_with_list = test_dataset.get_class(other_target_set)
recombined = other_targets_ds + random_target_ds
empty_dataset = ClfDataset()
test2 = ClfDataset()
test3 = ClfDataset()
def test_empty():
assert not empty_dataset
def test_target_type():
rand_id = test_dataset.samplet_ids[np.random.randint(2, num_samples)]
if not isinstance(test_dataset.targets[rand_id],
test_dataset._target_type):
raise TypeError('invalid target type for samplet id {}'.format(rand_id))
def test_num_targets():
assert test_dataset.num_targets == num_targets
def test_num_features():
assert test_dataset.num_features == num_features
def test_shape():
assert test_dataset.shape == (num_samples, num_features)
def test_num_features_setter():
with raises(AttributeError):
test_dataset.num_features = 0
def test_num_samples():
assert test_dataset.num_samplets == sum(target_sizes)
def test_subtract():
assert other_targets_ds.num_samplets == sum(target_sizes) - target_sizes[rand_index]
def test_get_target_list():
assert other_targets_ds == other_targets_get_with_list
def test_add():
a = other_targets_ds + random_target_ds
n = a.num_samplets
n1 = other_targets_ds.num_samplets
n2 = random_target_ds.num_samplets
assert n1 + n2 == n
assert set(a.samplet_ids) == set(
other_targets_ds.samplet_ids + random_target_ds.samplet_ids)
assert a.num_features == other_targets_ds.num_features == \
random_target_ds.num_features
assert all(a.feature_names == other_targets_ds.feature_names)
comb_ds = test_dataset + same_ids_new_feat
comb_names = np.concatenate([test_dataset.feature_names,
same_ids_new_feat.feature_names])
if not all(comb_ds.feature_names == comb_names):
raise ValueError('feature names were not carried forward in combining two '
'datasets with same IDs and different feature names!')
def test_set_existing_sample():
sid = test_dataset.samplet_ids[0]
new_feat = np.random.random(num_features)
with raises(KeyError):
test_dataset[sid + 'nonexisting'] = new_feat
with raises(ValueError):
test_dataset[sid] = new_feat[:-2] # diff dimensionality
test_dataset[sid] = new_feat
if not np.all(test_dataset[sid] == new_feat):
raise ValueError('Bug in replacing features for an existing sample!'
'Retrieved features do not match previously set features.')
def test_data_type():
for in_dtype in [np.float_, np.int, np.bool_]:
cds = ClfDataset(dtype=in_dtype)
cds.add_samplet('a', [1, 2.0, -434], 'class')
if cds.dtype != in_dtype or cds['a'].dtype != in_dtype:
raise TypeError('Dataset not maintaining the features in the requested'
'dtype {}. They are in {}'.format(in_dtype, cds.dtype))
def test_cant_read_nonexisting_file():
with raises(IOError):
a = ClfDataset('/nonexistentrandomdir/disofddlsfj/arbitrary.noname.pkl')
def test_cant_write_to_nonexisting_dir():
with raises(IOError):
test_dataset.save('/nonexistentrandomdir/jdknvoindvi93/arbitrary.noname.pkl')
def test_invalid_constructor():
with raises(TypeError):
a = ClfDataset(
in_dataset='/nonexistentrandomdir/disofddlsfj/arbitrary.noname.pkl')
with raises(ValueError):
# data simply should not be a dict
b = ClfDataset(dataset_path=None, in_dataset=None, data=list())
with raises(ValueError):
c = ClfDataset(dataset_path=None,
in_dataset=None,
data=None,
targets='invalid_value')
def test_return_data_labels():
matrix, vec_labels, sub_ids = test_dataset.data_and_labels()
assert len(vec_labels) == len(sub_ids)
assert len(vec_labels) == matrix.shape[0]
def test_init_with_dict():
new_ds = ClfDataset(data=test_dataset.data,
targets=test_dataset.targets)
assert new_ds == test_dataset
# def test_labels_setter():
# fewer_labels = test_dataset.labels
# label_keys = list(fewer_labels.samplet_ids())
# fewer_labels.pop(label_keys[0])
#
# with raises(ValueError):
# test_dataset.labels = fewer_labels
#
# same_len_diff_key = fewer_labels
# same_len_diff_key[u'sldiursvdkvjs'] = 1
# with raises(ValueError):
# test_dataset.labels = same_len_diff_key
#
# # must be dict
# with raises(ValueError):
# test_dataset.labels = None
def test_targets_setter():
fewer_targets = test_dataset.targets
targets_keys = list(fewer_targets.keys())
fewer_targets.pop(targets_keys[0])
with raises(ValueError):
test_dataset.targets = fewer_targets
same_len_diff_key = fewer_targets
same_len_diff_key['sldiursvdkvjs'] = 'lfjd'
with raises(ValueError):
test_dataset.targets = same_len_diff_key
def test_feat_names_setter():
# fewer
with raises(ValueError):
test_dataset.feature_names = feat_names[0:test_dataset.num_features - 2]
# too many
with raises(ValueError):
test_dataset.feature_names = np.append(feat_names, 'blahblah')
def test_add_existing_id():
sid = test_dataset.samplet_ids[0]
with raises(ValueError):
test_dataset.add_samplet(sid, None, None)
def test_add_new_id_diff_dim():
new_id = 'dsfdkfslj38748937439kdshfkjhf38'
sid = test_dataset.samplet_ids[0]
data_diff_dim = np.random.rand(test_dataset.num_features + 1, 1)
with raises(ValueError):
test_dataset.add_samplet(new_id, data_diff_dim, None, None)
def test_del_nonexisting_id():
nonexisting_id = u'dsfdkfslj38748937439kdshfkjhf38'
with warns(UserWarning):
test_dataset.del_samplet(nonexisting_id)
def test_get_nonexisting_class():
nonexisting_id = u'dsfdkfslj38748937439kdshfkjhf38'
with raises(ValueError):
test_dataset.get_class(nonexisting_id)
def test_rand_feat_subset():
nf = copy_dataset.num_features
subset_len = np.random.randint(1, nf)
subset = np.random.randint(1, nf, size=subset_len)
subds = copy_dataset.get_feature_subset(subset)
assert subds.num_features == subset_len
def test_eq_self():
assert test_dataset == test_dataset
def test_eq_copy():
new_copy = ClfDataset(in_dataset=copy_dataset)
assert new_copy == copy_dataset
def test_unpickling():
out_file = os.path.join(out_dir, 'random_pickled_dataset.pkl')
copy_dataset.save(out_file)
reloaded_dataset = ClfDataset(dataset_path=out_file,
description='reloaded test_dataset')
assert copy_dataset == reloaded_dataset
def test_subset_class():
assert random_target_ds.num_samplets == target_sizes[rand_index]
def test_get_subset():
assert random_target_ds == reloaded_dataset.get_class(random_target_name)
nonexisting_id = u'dsfdkfslj38748937439kdshfkjhf38'
with warns(UserWarning):
test_dataset.get_subset(nonexisting_id)
def test_membership():
rand_idx = np.random.randint(0, test_dataset.num_samplets)
member = test_dataset.samplet_ids[rand_idx]
not_member = u'sdfdkshfdsk34823058wdkfhd83hifnalwe8fh8t'
assert member in test_dataset
assert not_member not in test_dataset
def rand_ints_range(n, k):
return np.random.randint(1, n, size=min(n, k))
def test_glance():
for k in np.random.randint(1, test_dataset.num_samplets - 1, 10):
glanced_subset = test_dataset.glance(k)
assert len(glanced_subset) == k
def test_random_subset():
for perc in np.arange(0.1, 1, 0.2):
subset = copy_dataset.random_subset(perc_in_class=perc)
# separating the calculation by class to mimic the implementation in the
# class
expected_size = sum([np.int64(np.floor(n_in_class * perc)) for n_in_class in
target_sizes])
assert subset.num_samplets == expected_size
def test_random_subset_by_count():
smallest_size = min(target_sizes)
for count in np.random.randint(1, smallest_size, 7):
subset = copy_dataset.random_subset_ids_by_count(count_per_class=count)
assert len(subset) == num_targets * count
def test_train_test_split_ids_count():
smallest_size = min(target_sizes)
for count in np.random.randint(1, smallest_size, 7):
subset_train, subset_test = copy_dataset.train_test_split_ids(
count_per_class=count)
assert len(subset_train) == num_targets * count
assert len(subset_test) == copy_dataset.num_samplets - num_targets * count
assert len(set(subset_train).intersection(subset_test)) == 0
with raises(ValueError):
copy_dataset.train_test_split_ids(count_per_class=-1)
with raises(ValueError):
copy_dataset.train_test_split_ids(
count_per_class=copy_dataset.num_samplets + 1.0)
with raises(ValueError):
# both cant be specified at the same time
copy_dataset.train_test_split_ids(count_per_class=2, train_perc=0.5)
def test_train_test_split_ids_perc():
for perc in np.arange(0.25, 1.0, 0.1):
subset_train, subset_test = copy_dataset.train_test_split_ids(
train_perc=perc)
expected_train_size = sum(np.floor(target_sizes * perc))
assert len(subset_train) == expected_train_size
assert len(subset_test) == copy_dataset.num_samplets - expected_train_size
assert len(set(subset_train).intersection(subset_test)) == 0
with raises(ValueError):
subset_train, subset_test = copy_dataset.train_test_split_ids(
train_perc=0.00001)
with raises(ValueError):
copy_dataset.train_test_split_ids(train_perc=1.1)
with raises(ValueError):
copy_dataset.train_test_split_ids(train_perc=-1)
# ------------------------------------------------
# different file formats
# ------------------------------------------------
def test_load_arff():
arff_path = realpath(pjoin(dirname(__file__),
'..', '..', 'example_datasets', 'iris.arff'))
mld = ClfDataset.from_arff(arff_path=arff_path)
if mld.num_samplets != 150:
raise ValueError('number of samples mismatch')
if mld.num_features != 4:
raise ValueError('number of features mismatch')
if mld.num_targets != 3:
raise ValueError('number of classes mismatch')
if len(mld.feature_names) != 4:
raise ValueError('length of feature names do not match number of features')
# print(mld)
test_data_type() | mit |
woobe/h2o | py/testdir_single_jvm/test_frame_nfold_extract.py | 1 | 1988 | import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_hosts, h2o_import as h2i, h2o_jobs, h2o_exec as h2e
DO_POLL = False
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1,java_heap_GB=4, base_port=54323)
else:
h2o_hosts.build_cloud_with_hosts(base_port=54323)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_frame_nfold_extract(self):
h2o.beta_features = True
csvFilename = 'covtype.data'
csvPathname = 'standard/' + csvFilename
hex_key = "covtype.hex"
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, hex_key=hex_key, schema='local', timeoutSecs=10)
print "Just frame_nfold_extract away and see if anything blows up"
splitMe = hex_key
inspect = h2o_cmd.runInspect(key=splitMe)
origNumRows = inspect['numRows']
origNumCols = inspect['numCols']
for s in range(20):
inspect = h2o_cmd.runInspect(key=splitMe)
numRows = inspect['numRows']
numCols = inspect['numCols']
# FIX! should check if afold is outside of nfold range allowance
fs = h2o.nodes[0].frame_nfold_extract(source=splitMe, nfolds=2, afold=random.randint(0,1))
print "fs", h2o.dump_json(fs)
split0_key = fs['split_keys'][0]
split1_key = fs['split_keys'][1]
split0_rows = fs['split_rows'][0]
split1_rows = fs['split_rows'][1]
print "Iteration", s, "split0_rows:", split0_rows, "split1_rows:", split1_rows
splitMe = split0_key
if split0_rows<=2:
break
print "Iteration", s
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
Fireblend/scikit-learn | sklearn/datasets/covtype.py | 177 | 3821 | """Forest covertype dataset.
A classic dataset for classification benchmarks, featuring categorical and
real-valued features.
The dataset page is available from UCI Machine Learning Repository
http://archive.ics.uci.edu/ml/datasets/Covertype
Courtesy of Jock A. Blackard and Colorado State University.
"""
# Author: Lars Buitinck <[email protected]>
# Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import sys
from gzip import GzipFile
from io import BytesIO
import logging
from os.path import exists, join
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
import numpy as np
from .base import get_data_home
from .base import Bunch
from ..utils.fixes import makedirs
from ..externals import joblib
from ..utils import check_random_state
URL = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/covtype/covtype.data.gz')
logger = logging.getLogger()
def fetch_covtype(data_home=None, download_if_missing=True,
random_state=None, shuffle=False):
"""Load the covertype dataset, downloading it if necessary.
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : string, optional
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : boolean, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, optional (default=None)
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (581012, 54)
Each row corresponds to the 54 features in the dataset.
dataset.target : numpy array of shape (581012,)
Each value corresponds to one of the 7 forest covertypes with values
ranging between 1 to 7.
dataset.DESCR : string
Description of the forest covertype dataset.
"""
data_home = get_data_home(data_home=data_home)
if sys.version_info[0] == 3:
# The zlib compression format use by joblib is not compatible when
# switching from Python 2 to Python 3, let us use a separate folder
# under Python 3:
dir_suffix = "-py3"
else:
# Backward compat for Python 2 users
dir_suffix = ""
covtype_dir = join(data_home, "covertype" + dir_suffix)
samples_path = join(covtype_dir, "samples")
targets_path = join(covtype_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
makedirs(covtype_dir, exist_ok=True)
logger.warning("Downloading %s" % URL)
f = BytesIO(urlopen(URL).read())
Xy = np.genfromtxt(GzipFile(fileobj=f), delimiter=',')
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32)
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
return Bunch(data=X, target=y, DESCR=__doc__)
| bsd-3-clause |
thientu/scikit-learn | examples/linear_model/plot_ridge_path.py | 253 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
ivankreso/stereo-vision | scripts/stereo_model_sba/generate_features.py | 1 | 10443 | #!/bin/python
import os, shutil
import numpy as np
import matplotlib.pyplot as plt
import np_helper as nph
# plot 3d points
def plot_pts3d(pts3d, visible_status):
fig_xz = plt.figure()
plt.xlabel('X (m)', fontsize=14)
plt.ylabel('Z (m)', fontsize=14)
plt.axis('equal')
plt.plot(pts3d[0,:], pts3d[2,:], "ro")
for i in range(visible_status.shape[0]):
if visible_status[i] == 0:
plt.plot(pts3d[0,i], pts3d[2,i], "ko")
fig_xy = plt.figure()
plt.xlabel('X (m)', fontsize=14)
plt.ylabel('Y (m)', fontsize=14)
plt.axis('equal')
plt.gca().invert_yaxis()
plt.plot(pts3d[0,:], pts3d[1,:], 'bo')
for i in range(visible_status.shape[0]):
if visible_status[i] == 0:
plt.plot(pts3d[0,i], pts3d[1,i], "ko")
fig_xz.savefig("plot_pts3d_xz.pdf", bbox_inches='tight')
fig_xy.savefig("plot_pts3d_xy.pdf", bbox_inches='tight')
# non blocking
#plt.ion()
# or
#plt.draw()
#plt.show()
# plot optical flow
def plot_flow(pts2d_left, pts2d_right, imgsz):
fig_flow = plt.figure()
#plt.xlim(0, width)
#plt.ylim(0, height)
plt.axis([0, imgsz[0], 0, imgsz[1]], 'equal')
plt.gca().invert_yaxis()
plt.xlabel('u (pixels)', fontsize=14)
plt.ylabel('v (pixels)', fontsize=14)
for i in range(pts2d_left.shape[1]):
# if not visible in cam - skip it
if pts2d_left[0,i] < -0.5 or pts2d_right[0,i] < -0.5:
continue
match_x = np.array([pts2d_left[0,i], pts2d_right[0,i]])
match_y = np.array([pts2d_left[1,i], pts2d_right[1,i]])
plt.plot(match_x, match_y, 'k.-')
plt.plot(pts2d_left[0,i], pts2d_left[1,i], 'r.')
plt.plot(pts2d_right[0,i], pts2d_right[1,i], 'b.')
fig_flow.savefig("plot_artif_disp.pdf", bbox_inches='tight')
# project 3d points onto image plane
def project_points(imgsz, C, Rt, pts3d):
pts2d = C.dot(Rt.dot(pts3d))
for i in range(pts2d.shape[1]):
if pts2d[2,i] > 1.0:
pts2d[:,i] = pts2d[:,i] / pts2d[2,i]
else:
pts2d[:,i] = -1.0
#print(pts2d[:,i])
return pts2d[0:2,:]
# convert projected 2d points into pixels - simulates camera sensor
def pixelize(imgsz, pts2d):
for i in range(pts2d.shape[1]):
# if in sensor range - pixelize it
if pts2d[0,i] >= -0.5 and pts2d[0,i] <= (imgsz[0]-0.5) and pts2d[1,i] >= -0.5 and pts2d[1,i] <= (imgsz[1]-0.5):
#continue
pts2d[:,i] = np.round(pts2d[:,i]) # SBA slightly better with SWS = 5
# add gaussian noise
#noise = np.random.normal(0.0, 0.2)
#noise = np.random.normal(0.0, 0.3) # SBA still better
#noise = np.random.normal(0.0, 0.4) # worse
#pts2d[:,i] = pts2d[:,i] + noise
# else remove that point
else:
pts2d[:,i] = -1.0
def getVisibleStatus(projs):
status = np.ones(projs.shape[2], dtype=np.int8)
for i in range(status.shape[0]):
if projs[0,0,i] < -0.5 or projs[1,0,i] < -0.5 or projs[2,0,i] < -0.5 or projs[3,0,i] < -0.5:
status[i] = 0
return status
def triangulate(C, b, proj_left, projs_right):
f = C[0,0]
cx = C[0,2]
cy = C[1,2]
x = proj_left[0]
y = proj_left[1]
pt3d = np.zeros((3))
disp = x - projs_right[0]
disp = max(disp, 0.001)
pt3d[0] = (x - cx) * b / disp;
pt3d[1] = (y - cy) * b / disp;
pt3d[2] = f * b / disp;
return pt3d
def update_age(age, projs_left, projs_right, frame):
if frame == 0:
for i in range(age.shape[1]):
age[frame,i] = 0
else:
for i in range(age.shape[1]):
if projs_left[0,i] < -0.5 or projs_left[0,i] < -0.5 or projs_right[0,i] < -0.5 or projs_right[0,i] < -0.5:
age[frame,i] = -1
else:
age[frame,i] = age[frame-1,i] + 1
def write_tracker_data(folder, projs_left, projs_right, age):
num_frames = projs_left.shape[0]
# write 3d points and 2d projs in files
for i in range(num_frames):
write_frame_projs(i, folder + "/%06d" % (i) + ".txt", projs_left[i,:,:], projs_right[i,:,:], age[i,:])
def write_frame_projs(i, filename, projs_left, projs_right, age):
fp = open(filename, "w")
for i in range(projs_left.shape[1]):
# if point not visible in some of 4 images, skip
fp.write(str(i) + " " + str(age[i]))
# write left and right features for every frame
fp.write(" " + str(projs_left[0,i]) + " " + str(projs_left[1,i]) + " "
+ str(projs_right[0,i]) + " " + str(projs_right[1,i]))
fp.write("\n")
fp.close()
def write_points_sba(filename, C, baseline, extr_params, projs_left, projs_right, pts3d_gt):
num_world_pts = projs_left.shape[2]
fp = open(filename, "w")
fp.write(str(C[0,0]) + " " + str(C[1,1]) + " " + str(C[0,2]) + " " + str(C[1,2]) + " " + str(baseline) + "\n")
pts3d_lst = []
observ_left_lst = []
observ_right_lst = []
#projs_left = np.zeros((nframes, 2, pts3d_num))
#points3d = np.array(3, num_world_pts)
assert projs_left.shape[0] == extr_params.shape[0]
num_frames = projs_left.shape[0]
num_points = 0
num_observations = 0
for i in range(num_world_pts):
# if visible in first frame add that point and all its observations
if projs_left[0,0,i] >= 0.0 and projs_right[0,0,i] >= 0.0:
num_points += 1
#points3d[:,i] = triangulate(C, baseline, projs_left[0,:,i], projs_right[0,:,i])
pts3d_lst.append(triangulate(C, baseline, projs_left[0,:,i], projs_right[0,:,i]))
print(pts3d_lst[-1].T, " --> ", pts3d_gt[:,i])
observ_left = np.ndarray(shape=(2,0))
observ_right = np.ndarray(shape=(2,0))
for f in range(num_frames):
# add until we find unvisible projection
if projs_left[f,0,i] >= 0.0 and projs_right[f,0,i] >= 0.0:
#print(projs_left[f,:,i].reshape(2,1))
observ_left = np.hstack([observ_left, projs_left[f,:,i].reshape(2,1)])
observ_right = np.hstack([observ_right, projs_right[f,:,i].reshape(2,1)])
num_observations += 1
else:
break
observ_left_lst.append(observ_left)
observ_right_lst.append(observ_right)
#pts3d = np.array(pts3d_lst)
fp.write(str(num_frames) + " " + str(num_points) + " " + str(num_observations) + "\n")
for i in range(len(observ_left_lst)):
left = observ_left_lst[i]
right = observ_right_lst[i]
for f in range(left.shape[1]):
fp.write(str(f) + " " + str(i) + " " + str(left[0,f]) + " " + str(left[1,f]) + " "
+ str(right[0,f]) + " " + str(right[1,f]) + "\n")
for i in range(extr_params.shape[0]):
#R = Rt[i,:].reshape(4,4)[0:4,0:4]
#(rvec, jac) = cv2.Rodrigues(R)
rt_vec = extr_params[i,:]
for j in range(rt_vec.shape[0]):
fp.write(str(rt_vec[j]) + "\n")
for i in range(len(pts3d_lst)):
pts3d = pts3d_lst[i]
print(pts3d)
for j in range(3):
fp.write(str(pts3d[j]) + "\n")
# main
np.set_printoptions(precision=3, linewidth=180)
path_file = "path.txt"
path_estim = "path_noise.txt"
out_folder = "/home/kivan/Projects/datasets/stereo_sba/"
# bumblebee
#imgsz = np.array([640, 480])
#cam_mat = "C_bb.txt"
#baseline = 0.12
#out_folder_prefix = "/home/kivan/Projects/datasets/stereo_sba/"
# libviso 00 cam
imgsz = np.array([1241,376])
cam_mat = "C_libviso_00.txt"
baseline = 0.53716
#out_folder = "/home/kreso/projects/master_thesis/datasets/stereo_model/pointdata_viso00path_00cam/"
Rt_I = np.eye(4)
#C = np.eye(3)
#np.savetxt('C.txt', C, fmt='%.2f')
C = np.loadtxt(cam_mat)
print('C:\n', C, '\n')
extr_noise = np.loadtxt(path_estim)
Rt_mats = np.loadtxt(path_file)
Rt_mats = np.append(Rt_mats, np.zeros((Rt_mats.shape[0], 3)), 1) # add three zero columns
Rt_mats = np.append(Rt_mats, np.array([[1] * Rt_mats.shape[0]]).T, 1) # add one ones column
#print("Rt mats: \n", Rt_mats, "\n")
nframes = Rt_mats.shape[0]
# generate new 3D points in front of current camera position
pts3d = np.loadtxt("pts3d.txt")
#print(pts3d)
pts3d_num = pts3d.shape[1]
projs_left = np.zeros((nframes, 2, pts3d_num))
projs_right = np.zeros((nframes, 2, pts3d_num))
age = np.zeros((nframes, pts3d_num), dtype='int32')
# Tb transform puts right camera in center
Tb = np.eye(4)
Tb[0,3] = - baseline
print("Tb:\n", Tb, "\n")
for i in range(nframes):
## inputs are camera position matrices in each frame
## so they are inverse of points transform Rt matrix
#Rt_prev_inv = Rt_mats[i,:].reshape(4,4)
#Rt_curr_inv = Rt_mats[i+1,:].reshape(4,4)
## calculate point trasform Rt matrices in 2 frames (inverse of camera transform matrices)
## slower way
##Rt_prev = np.linalg.inv(Rt_prev_inv)
##Rt_curr = np.linalg.inv(Rt_curr_inv)
## faster (and better) way
#Rt_prev = nph.inv_Rt(Rt_prev_inv)
#Rt_curr = nph.inv_Rt(Rt_curr_inv)
#print(Rt_prev)
#print(nph.inv_Rt(Rt_prev_inv))
# project 3d point on image plane
print("Frame: " + str(i))
Rt = nph.inv_Rt(Rt_mats[i,:].reshape(4,4))
pts2d_left = project_points(imgsz, C, Rt, pts3d)
pts2d_right = project_points(imgsz, C, Tb.dot(Rt), pts3d)
# round them up in pixels
pixelize(imgsz, pts2d_left)
pixelize(imgsz, pts2d_right)
update_age(age, pts2d_left, pts2d_right, i)
projs_left[i,:,:] = pts2d_left
projs_right[i,:,:] = pts2d_right
#print("Frame " + str(i) + "\npoints visible: " + "%d / %d" % (visible_num, pts3d.shape[2]))
#print("Plotting 3d points")
#visible_pts = getVisibleStatus(projs_left)
#plot_pts3d(pts3d, visible_pts)
#plot_flow(pts2d_left, pts2d_right, imgsz)
#plt.show()
#exit(0)
# TODO: remove pts3d - write_points_sba("SBA_dataset.txt", C, baseline, extr_noise, projs_left, projs_right, pts3d)
write_tracker_data(out_folder, projs_left, projs_right, age)
#fig_triang = plt.figure()
#plt.axis([0, 185, 0, 35], 'equal')
#plt.plot(triang_errors[0,:], "r-", label="Bumblebee cam")
#plt.plot(triang_errors[1,:], "b-", label="KITTI cam")
#plt.xlabel('frame number', fontsize=30)
#plt.ylabel('triangulation error (m)', fontsize=30)
#plt.legend(fontsize=22)
#plt.show()
#fig_triang.savefig("plot_triang_error.pdf", bbox_inches='tight')
| bsd-3-clause |
marchick209/Convolutional-Neural-Networks | src/lenet.py | 1 | 48961 | #!/usr/bin/python
# General Packages
import os
import sys
import time
import pdb
from collections import OrderedDict
# Math Packages
import math
import numpy
import cv2
# Theano Packages
import theano
import theano.tensor as T
from theano.ifelse import ifelse
# CNN code packages
from cnn import ReLU
from cnn import Sigmoid
from cnn import Tanh
from cnn import SVMLayer
from cnn import LogisticRegression
from cnn import HiddenLayer
from cnn import _dropout_from_layer
from cnn import DropoutHiddenLayer
from cnn import MLP
from cnn import LeNetConvPoolLayer
from util import visualize
from util import visualize_color_filters
from util import save_network
# Datahandling packages
from loaders import load_data_pkl
from loaders import load_data_mat
from loaders import load_skdata_caltech101
from loaders import load_skdata_mnist
from loaders import load_skdata_cifar10
from loaders import load_skdata_mnist_noise1
from loaders import load_skdata_mnist_noise2
from loaders import load_skdata_mnist_noise3
from loaders import load_skdata_mnist_noise4
from loaders import load_skdata_mnist_noise5
from loaders import load_skdata_mnist_noise6
from loaders import load_skdata_mnist_bg_images
from loaders import load_skdata_mnist_bg_rand
from loaders import load_skdata_mnist_rotated
from loaders import load_skdata_mnist_rotated_bg
def run_cnn( arch_params,
optimization_params ,
data_params,
filename_params,
visual_params,
verbose = False,
):
#####################
# Unpack Variables #
#####################
results_file_name = filename_params [ "results_file_name" ] # Files that will be saved down on completion Can be used by the parse.m file
error_file_name = filename_params [ "error_file_name" ]
cost_file_name = filename_params [ "cost_file_name" ]
confusion_file_name = filename_params [ "confusion_file_name" ]
network_save_name = filename_params [ "network_save_name" ]
dataset = data_params [ "loc" ]
height = data_params [ "height" ]
width = data_params [ "width" ]
batch_size = data_params [ "batch_size" ]
load_batches = data_params [ "load_batches" ] * batch_size
batches2train = data_params [ "batches2train" ]
batches2test = data_params [ "batches2test" ]
batches2validate = data_params [ "batches2validate" ]
channels = data_params [ "channels" ]
mom_start = optimization_params [ "mom_start" ]
mom_end = optimization_params [ "mom_end" ]
mom_epoch_interval = optimization_params [ "mom_interval" ]
mom_type = optimization_params [ "mom_type" ]
initial_learning_rate = optimization_params [ "initial_learning_rate" ]
learning_rate_decay = optimization_params [ "learning_rate_decay" ]
ada_grad = optimization_params [ "ada_grad" ]
fudge_factor = optimization_params [ "fudge_factor" ]
l1_reg = optimization_params [ "l1_reg" ]
l2_reg = optimization_params [ "l2_reg" ]
rms_prop = optimization_params [ "rms_prop" ]
rms_rho = optimization_params [ "rms_rho" ]
rms_epsilon = optimization_params [ "rms_epsilon" ]
squared_filter_length_limit = arch_params [ "squared_filter_length_limit" ]
n_epochs = arch_params [ "n_epochs" ]
validate_after_epochs = arch_params [ "validate_after_epochs" ]
mlp_activations = arch_params [ "mlp_activations" ]
cnn_activations = arch_params [ "cnn_activations" ]
dropout = arch_params [ "dropout" ]
column_norm = arch_params [ "column_norm" ]
dropout_rates = arch_params [ "dropout_rates" ]
nkerns = arch_params [ "nkerns" ]
outs = arch_params [ "outs" ]
filter_size = arch_params [ "filter_size" ]
pooling_size = arch_params [ "pooling_size" ]
num_nodes = arch_params [ "num_nodes" ]
use_bias = arch_params [ "use_bias" ]
random_seed = arch_params [ "random_seed" ]
svm_flag = arch_params [ "svm_flag" ]
visualize_flag = visual_params ["visualize_flag" ]
visualize_after_epochs = visual_params ["visualize_after_epochs" ]
n_visual_images = visual_params ["n_visual_images" ]
display_flag = visual_params ["display_flag" ]
# Random seed initialization.
rng = numpy.random.RandomState(random_seed)
#################
# Data Loading #
#################
print "... loading data"
# load matlab files as dataset.
if data_params["type"] == 'mat':
train_data_x, train_data_y, train_data_y1 = load_data_mat(dataset, batch = 1 , type_set = 'train')
test_data_x, test_data_y, valid_data_y1 = load_data_mat(dataset, batch = 1 , type_set = 'test') # Load dataset for first epoch.
valid_data_x, valid_data_y, test_data_y1 = load_data_mat(dataset, batch = 1 , type_set = 'valid') # Load dataset for first epoch.
train_set_x = theano.shared(numpy.asarray(train_data_x, dtype=theano.config.floatX), borrow=True)
train_set_y = theano.shared(numpy.asarray(train_data_y, dtype='int32'), borrow=True)
train_set_y1 = theano.shared(numpy.asarray(train_data_y1, dtype=theano.config.floatX), borrow=True)
test_set_x = theano.shared(numpy.asarray(test_data_x, dtype=theano.config.floatX), borrow=True)
test_set_y = theano.shared(numpy.asarray(test_data_y, dtype='int32'), borrow=True)
test_set_y1 = theano.shared(numpy.asarray(test_data_y1, dtype=theano.config.floatX), borrow=True)
valid_set_x = theano.shared(numpy.asarray(valid_data_x, dtype=theano.config.floatX), borrow=True)
valid_set_y = theano.shared(numpy.asarray(valid_data_y, dtype='int32'), borrow=True)
valid_set_y1 = theano.shared(numpy.asarray(valid_data_y1, dtype=theano.config.floatX), borrow=True)
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
multi_load = True
# load pkl data as is shown in theano tutorials
elif data_params["type"] == 'pkl':
data = load_data_pkl(dataset)
train_set_x, train_set_y, train_set_y1 = data[0]
valid_set_x, valid_set_y, valid_set_y1 = data[1]
test_set_x, test_set_y, test_set_y1 = data[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
n_train_images = train_set_x.get_value(borrow=True).shape[0]
n_test_images = test_set_x.get_value(borrow=True).shape[0]
n_valid_images = valid_set_x.get_value(borrow=True).shape[0]
n_train_batches_all = n_train_images / batch_size
n_test_batches_all = n_test_images / batch_size
n_valid_batches_all = n_valid_images / batch_size
if (n_train_batches_all < batches2train) or (n_test_batches_all < batches2test) or (n_valid_batches_all < batches2validate): # You can't have so many batches.
print "... !! Dataset doens't have so many batches. "
raise AssertionError()
multi_load = False
# load skdata ( its a good library that has a lot of datasets)
elif data_params["type"] == 'skdata':
if (dataset == 'mnist' or
dataset == 'mnist_noise1' or
dataset == 'mnist_noise2' or
dataset == 'mnist_noise3' or
dataset == 'mnist_noise4' or
dataset == 'mnist_noise5' or
dataset == 'mnist_noise6' or
dataset == 'mnist_bg_images' or
dataset == 'mnist_bg_rand' or
dataset == 'mnist_rotated' or
dataset == 'mnist_rotated_bg') :
print "... importing " + dataset + " from skdata"
func = globals()['load_skdata_' + dataset]
data = func()
train_set_x, train_set_y, train_set_y1 = data[0]
valid_set_x, valid_set_y, valid_set_y1 = data[1]
test_set_x, test_set_y, test_set_y1 = data[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
n_train_images = train_set_x.get_value(borrow=True).shape[0]
n_test_images = test_set_x.get_value(borrow=True).shape[0]
n_valid_images = valid_set_x.get_value(borrow=True).shape[0]
n_train_batches_all = n_train_images / batch_size
n_test_batches_all = n_test_images / batch_size
n_valid_batches_all = n_valid_images / batch_size
if (n_train_batches_all < batches2train) or (n_test_batches_all < batches2test) or (n_valid_batches_all < batches2validate): # You can't have so many batches.
print "... !! Dataset doens't have so many batches. "
raise AssertionError()
multi_load = False
elif dataset == 'cifar10':
print "... importing cifar 10 from skdata"
data = load_skdata_cifar10()
train_set_x, train_set_y, train_set_y1 = data[0]
valid_set_x, valid_set_y, valid_set_y1 = data[1]
test_set_x, test_set_y, test_set_y1 = data[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
multi_load = False
elif dataset == 'caltech101':
print "... importing caltech 101 from skdata"
# shuffle the data
total_images_in_dataset = 9144
rand_perm = numpy.random.permutation(total_images_in_dataset) # create a constant shuffle, so that data can be loaded in batchmode with the same random shuffle
n_train_images = total_images_in_dataset / 3
n_test_images = total_images_in_dataset / 3
n_valid_images = total_images_in_dataset / 3
n_train_batches_all = n_train_images / batch_size
n_test_batches_all = n_test_images / batch_size
n_valid_batches_all = n_valid_images / batch_size
if (n_train_batches_all < batches2train) or (n_test_batches_all < batches2test) or (n_valid_batches_all < batches2validate): # You can't have so many batches.
print "... !! Dataset doens't have so many batches. "
raise AssertionError()
train_data_x, train_data_y = load_skdata_caltech101(batch_size = load_batches, rand_perm = rand_perm, batch = 1 , type_set = 'train' , height = height, width = width)
test_data_x, test_data_y = load_skdata_caltech101(batch_size = load_batches, rand_perm = rand_perm, batch = 1 , type_set = 'test' , height = height, width = width) # Load dataset for first epoch.
valid_data_x, valid_data_y = load_skdata_caltech101(batch_size = load_batches, rand_perm = rand_perm, batch = 1 , type_set = 'valid' , height = height, width = width) # Load dataset for first epoch.
train_set_x = theano.shared(train_data_x, borrow=True)
train_set_y = theano.shared(train_data_y, borrow=True)
test_set_x = theano.shared(test_data_x, borrow=True)
test_set_y = theano.shared(test_data_y, borrow=True)
valid_set_x = theano.shared(valid_data_x, borrow=True)
valid_set_y = theano.shared(valid_data_y, borrow=True)
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
multi_load = True
# Just checking as a way to see if the intended dataset is indeed loaded.
assert height*width*channels == train_set_x.get_value( borrow = True ).shape[1]
assert batch_size >= n_visual_images
if ada_grad is True:
assert rms_prop is False
elif rms_prop is True:
assert ada_grad is False
fudge_factor = rms_epsilon
######################
# BUILD NETWORK #
######################
print '... building the network'
start_time = time.clock()
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of [int]
if svm_flag is True:
y1 = T.matrix('y1') # [-1 , 1] labels in case of SVM
first_layer_input = x.reshape((batch_size, channels, height, width))
# Create first convolutional - pooling layers
activity = [] # to record Cnn activities
weights = []
conv_layers=[]
filt_size = filter_size[0]
pool_size = pooling_size[0]
if not nkerns == []:
conv_layers.append ( LeNetConvPoolLayer(
rng,
input = first_layer_input,
image_shape=(batch_size, channels , height, width),
filter_shape=(nkerns[0], channels , filt_size, filt_size),
poolsize=(pool_size, pool_size),
activation = cnn_activations[0],
verbose = verbose
) )
activity.append ( conv_layers[-1].output )
weights.append ( conv_layers[-1].filter_img)
# Create the rest of the convolutional - pooling layers in a loop
next_in_1 = ( height - filt_size + 1 ) / pool_size
next_in_2 = ( width - filt_size + 1 ) / pool_size
for layer in xrange(len(nkerns)-1):
filt_size = filter_size[layer+1]
pool_size = pooling_size[layer+1]
conv_layers.append ( LeNetConvPoolLayer(
rng,
input=conv_layers[layer].output,
image_shape=(batch_size, nkerns[layer], next_in_1, next_in_2),
filter_shape=(nkerns[layer+1], nkerns[layer], filt_size, filt_size),
poolsize=(pool_size, pool_size),
activation = cnn_activations[layer+1],
verbose = verbose
) )
next_in_1 = ( next_in_1 - filt_size + 1 ) / pool_size
next_in_2 = ( next_in_2 - filt_size + 1 ) / pool_size
weights.append ( conv_layers[-1].filter_img )
activity.append( conv_layers[-1].output )
# Assemble fully connected laters
if nkerns == []:
fully_connected_input = first_layer_input
else:
fully_connected_input = conv_layers[-1].output.flatten(2)
if len(dropout_rates) > 2 :
layer_sizes =[]
layer_sizes.append( nkerns[-1] * next_in_1 * next_in_2 )
for i in xrange(len(dropout_rates)-1):
layer_sizes.append ( num_nodes[i] )
layer_sizes.append ( outs )
elif len(dropout_rates) == 1:
layer_size = [ nkerns[-1] * next_in_1 * next_in_2, outs]
else :
layer_sizes = [ nkerns[-1] * next_in_1 * next_in_2, num_nodes[0] , outs]
assert len(layer_sizes) - 1 == len(dropout_rates) # Just checking.
""" Dropouts implemented from paper:
Srivastava, Nitish, et al. "Dropout: A simple way to prevent neural networks
from overfitting." The Journal of Machine Learning Research 15.1 (2014): 1929-1958.
"""
MLPlayers = MLP( rng=rng,
input=fully_connected_input,
layer_sizes=layer_sizes,
dropout_rates=dropout_rates,
activations=mlp_activations,
use_bias = use_bias,
svm_flag = svm_flag,
verbose = verbose)
# Build the expresson for the categorical cross entropy function.
if svm_flag is False:
cost = MLPlayers.negative_log_likelihood( y )
dropout_cost = MLPlayers.dropout_negative_log_likelihood( y )
else :
cost = MLPlayers.negative_log_likelihood( y1 )
dropout_cost = MLPlayers.dropout_negative_log_likelihood( y1 )
# create theano functions for evaluating the graphs
test_model = theano.function(
inputs=[index],
outputs=MLPlayers.errors(y),
givens={
x: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size]})
validate_model = theano.function(
inputs=[index],
outputs=MLPlayers.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]})
prediction = theano.function(
inputs = [index],
outputs = MLPlayers.predicts,
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size]})
nll = theano.function(
inputs = [index],
outputs = MLPlayers.probabilities,
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size]})
# function to return activations of each image
activities = theano.function (
inputs = [index],
outputs = activity,
givens = {
x: train_set_x[index * batch_size: (index + 1) * batch_size]
})
# Compute cost and gradients of the model wrt parameter
params = []
for layer in conv_layers:
params = params + layer.params
params = params + MLPlayers.params
output = dropout_cost + l1_reg * MLPlayers.dropout_L1 + l2_reg * MLPlayers.dropout_L2 if dropout else cost + l1_reg * MLPlayers.L1 + l2_reg * MLPlayers.L2
gradients = []
for param in params:
gradient = T.grad( output ,param)
gradients.append ( gradient )
# TO DO: Try implementing Adadelta also.
# Compute momentum for the current epoch
epoch = T.scalar()
mom = ifelse(epoch <= mom_epoch_interval,
mom_start*(1.0 - epoch/mom_epoch_interval) + mom_end*(epoch/mom_epoch_interval),
mom_end)
# learning rate
eta = theano.shared(numpy.asarray(initial_learning_rate,dtype=theano.config.floatX))
# accumulate gradients for adagrad
grad_acc = []
for param in params:
eps = numpy.zeros_like(param.get_value(borrow=True), dtype=theano.config.floatX)
grad_acc.append(theano.shared(eps, borrow=True))
# accumulate velocities for momentum
velocities = []
for param in params:
velocity = theano.shared(numpy.zeros(param.get_value(borrow=True).shape,dtype=theano.config.floatX))
velocities.append(velocity)
# create updates for each combination of stuff
updates = OrderedDict()
print_flag = False
for velocity, gradient, acc , param in zip(velocities, gradients, grad_acc, params):
if ada_grad is True:
""" Adagrad implemented from paper:
John Duchi, Elad Hazan, and Yoram Singer. 2011. Adaptive subgradient methods
for online learning and stochastic optimization. JMLR
"""
current_acc = acc + T.sqr(gradient) # Accumulates Gradient
updates[acc] = current_acc # updates accumulation at timestamp
elif rms_prop is True:
""" Tieleman, T. and Hinton, G. (2012):
Neural Networks for Machine Learning, Lecture 6.5 - rmsprop.
Coursera. http://www.youtube.com/watch?v=O3sxAc4hxZU (formula @5:20)"""
current_acc = rms_rho * acc + (1 - rms_rho) * T.sqr(gradient)
updates[acc] = current_acc
else:
current_acc = 1
fudge_factor = 0
if mom_type == 0: # no momentum
updates[velocity] = -(eta / T.sqrt(current_acc + fudge_factor)) * gradient
#updates[velocity] = -1*eta*gradient
# perform adagrad velocity update
# this will be just added to parameters.
elif mom_type == 1: # if polyak momentum
""" Momentum implemented from paper:
Polyak, Boris Teodorovich. "Some methods of speeding up the convergence of iteration methods."
USSR Computational Mathematics and Mathematical Physics 4.5 (1964): 1-17.
Adapted from Sutskever, Ilya, Hinton et al. "On the importance of initialization and momentum in deep learning."
Proceedings of the 30th international conference on machine learning (ICML-13). 2013.
equation (1) and equation (2)"""
updates[velocity] = mom * velocity - (1.-mom) * ( eta / T.sqrt(current_acc+ fudge_factor)) * gradient
elif mom_type == 2: # Nestrov accelerated gradient beta stage...
"""Nesterov, Yurii. "A method of solving a convex programming problem with convergence rate O (1/k2)."
Soviet Mathematics Doklady. Vol. 27. No. 2. 1983.
Adapted from https://blogs.princeton.edu/imabandit/2013/04/01/acceleratedgradientdescent/
Instead of using past params we use the current params as described in this link
https://github.com/lisa-lab/pylearn2/pull/136#issuecomment-10381617,"""
updates[velocity] = mom * velocity - (1.-mom) * ( eta / T.sqrt(current_acc + fudge_factor)) * gradient
updates[param] = mom * updates[velocity]
else:
if print_flag is False:
print_flag = True
print "!! Unrecognized mometum type, switching to no momentum."
updates[velocity] = -( eta / T.sqrt(current_acc+ fudge_factor) ) * gradient
if mom_type != 2:
stepped_param = param + updates[velocity]
else:
stepped_param = param + updates[velocity] + updates[param]
if param.get_value(borrow=True).ndim == 2 and column_norm is True:
""" constrain the norms of the COLUMNs of the weight, according to
https://github.com/BVLC/caffe/issues/109 """
col_norms = T.sqrt(T.sum(T.sqr(stepped_param), axis=0))
desired_norms = T.clip(col_norms, 0, T.sqrt(squared_filter_length_limit))
scale = desired_norms / (1e-7 + col_norms)
updates[param] = stepped_param * scale
else:
updates[param] = stepped_param
if svm_flag is True:
train_model = theano.function(inputs= [index, epoch],
outputs=output,
updates=updates,
givens={
x: train_set_x[index * batch_size:(index + 1) * batch_size],
y1: train_set_y1[index * batch_size:(index + 1) * batch_size]},
on_unused_input='ignore'
)
else:
train_model = theano.function(inputs= [index, epoch],
outputs=output,
updates=updates,
givens={
x: train_set_x[index * batch_size:(index + 1) * batch_size],
y: train_set_y[index * batch_size:(index + 1) * batch_size]},
on_unused_input='ignore'
)
decay_learning_rate = theano.function(
inputs=[],
outputs=eta, # Just updates the learning rates.
updates={eta: eta * learning_rate_decay}
)
momentum_value = theano.function (
inputs =[epoch],
outputs = mom,
)
end_time = time.clock()
# setting up visualization stuff...
shuffle_batch_ind = numpy.arange(batch_size)
numpy.random.shuffle(shuffle_batch_ind)
visualize_ind = shuffle_batch_ind[0:n_visual_images]
#visualize_ind = range(n_visual_images)
main_img_visual = True
# create all directories required for saving results and data.
if visualize_flag is True:
if not os.path.exists('../visuals'):
os.makedirs('../visuals')
if not os.path.exists('../visuals/activities'):
os.makedirs('../visuals/activities')
for i in xrange(len(nkerns)):
os.makedirs('../visuals/activities/layer_'+str(i))
if not os.path.exists('../visuals/filters'):
os.makedirs('../visuals/filters')
for i in xrange(len(nkerns)):
os.makedirs('../visuals/filters/layer_'+str(i))
if not os.path.exists('../visuals/images'):
os.makedirs('../visuals/images')
if not os.path.exists('../results/'):
os.makedirs ('../results')
print "... -> building complete, took " + str((end_time - start_time)) + " seconds"
###############
# TRAIN MODEL #
###############
#pdb.set_trace()
print "... training"
start_time = time.clock()
patience = numpy.inf # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
this_validation_loss = []
best_validation_loss = numpy.inf
best_iter = 0
epoch_counter = 0
early_termination = False
cost_saved = []
best_params = None
iteration= 0
while (epoch_counter < n_epochs) and (not early_termination):
epoch_counter = epoch_counter + 1
for batch in xrange (batches2train):
if verbose is True:
print "... -> Epoch: " + str(epoch_counter) + " Batch: " + str(batch+1) + " out of " + str(batches2train) + " batches"
if multi_load is True:
iteration= (epoch_counter - 1) * n_train_batches * batches2train + batch
# Load data for this batch
if verbose is True:
print "... -> loading data for new batch"
if data_params["type"] == 'mat':
train_data_x, train_data_y, train_data_y1 = load_data_mat(dataset, batch = batch + 1 , type_set = 'train')
elif data_params["type"] == 'skdata':
if dataset == 'caltech101':
train_data_x, train_data_y = load_skdata_caltech101(batch_size = load_batches, batch = batch + 1 , type_set = 'train', rand_perm = rand_perm, height = height, width = width )
# Do not use svm_flag for caltech 101
train_set_x.set_value(train_data_x ,borrow = True)
train_set_y.set_value(train_data_y ,borrow = True)
for minibatch_index in xrange(n_train_batches):
if verbose is True:
print "... -> Mini Batch: " + str(minibatch_index + 1) + " out of " + str(n_train_batches)
cost_ij = train_model( minibatch_index, epoch_counter)
cost_saved = cost_saved +[cost_ij]
else:
iteration= (epoch_counter - 1) * n_train_batches + batch
cost_ij = train_model(batch, epoch_counter)
cost_saved = cost_saved +[cost_ij]
if epoch_counter % validate_after_epochs is 0:
# Load Validation Dataset here.
validation_losses = 0.
if multi_load is True:
# Load data for this batch
for batch in xrange ( batches2test ):
if data_params["type"] == 'mat':
valid_data_x, valid_data_y, valid_data_y1 = load_data_mat(dataset, batch = batch + 1 , type_set = 'valid')
elif data_params["type"] == 'skdata':
if dataset == 'caltech101':
valid_data_x, valid_data_y = load_skdata_caltech101(batch_size = load_batches, batch = batch + 1 , type_set = 'valid' , rand_perm = rand_perm, height = height, width = width )
# Do not use svm_flag for caltech 101
valid_set_x.set_value(valid_data_x,borrow = True)
valid_set_y.set_value(valid_data_y,borrow = True)
validation_losses = validation_losses + numpy.sum([[validate_model(i) for i in xrange(n_valid_batches)]])
this_validation_loss = this_validation_loss + [validation_losses]
if verbose is True:
if this_validation_loss[-1] < best_validation_loss :
print "... -> epoch " + str(epoch_counter) + ", cost: " + str(numpy.mean(cost_saved[-1*n_train_batches:])) +", validation accuracy :" + str(float( batch_size * n_valid_batches * batches2validate - this_validation_loss[-1])*100/(batch_size*n_valid_batches*batches2validate)) + "%, learning_rate = " + str(eta.get_value(borrow=True))+ ", momentum = " +str(momentum_value(epoch_counter)) + " -> best thus far "
else :
print "... -> epoch " + str(epoch_counter) + ", cost: " + str(numpy.mean(cost_saved[-1*n_train_batches:])) +", validation accuracy :" + str(float( batch_size * n_valid_batches * batches2validate - this_validation_loss[-1])*100/(batch_size*n_valid_batches*batches2validate)) + "%, learning_rate = " + str(eta.get_value(borrow=True)) + ", momentum = " +str(momentum_value(epoch_counter))
else:
if this_validation_loss[-1] < best_validation_loss :
print "... -> epoch " + str(epoch_counter) + ", cost: " + str(numpy.mean(cost_saved[-1*n_train_batches:])) +", validation accuracy :" + str(float( batch_size * n_valid_batches * batches2validate - this_validation_loss[-1])*100/(batch_size*n_valid_batches*batches2validate)) + "% -> best thus far "
else :
print "... -> epoch " + str(epoch_counter) + ", cost: " + str(numpy.mean(cost_saved[-1*n_train_batches:])) +", validation accuracy :" + str(float( batch_size * n_valid_batches * batches2validate - this_validation_loss[-1])*100/(batch_size*n_valid_batches*batches2validate)) + "%"
else:
validation_losses = [validate_model(i) for i in xrange(n_valid_batches)]
this_validation_loss = this_validation_loss + [numpy.sum(validation_losses)]
if verbose is True:
if this_validation_loss[-1] < best_validation_loss :
print "... -> epoch " + str(epoch_counter) + ", cost: " + str(cost_saved[-1]) +", validation accuracy :" + str(float(batch_size*n_valid_batches - this_validation_loss[-1])*100/(batch_size*n_valid_batches)) + "%, learning_rate = " + str(eta.get_value(borrow=True)) + ", momentum = " +str(momentum_value(epoch_counter)) + " -> best thus far "
else:
print "... -> epoch " + str(epoch_counter) + ", cost: " + str(cost_saved[-1]) +", validation accuracy :" + str(float(batch_size*n_valid_batches - this_validation_loss[-1])*100/(batch_size*n_valid_batches)) + "%, learning_rate = " + str(eta.get_value(borrow=True)) + ", momentum = " +str(momentum_value(epoch_counter))
else:
if this_validation_loss[-1] < best_validation_loss :
print "... -> epoch " + str(epoch_counter) + ", cost: " + str(cost_saved[-1]) +", validation accuracy :" + str(float(batch_size*n_valid_batches - this_validation_loss[-1])*100/(batch_size*n_valid_batches)) + "% -> best thus far "
else:
print "... -> epoch " + str(epoch_counter) + ", cost: " + str(cost_saved[-1]) +", validation accuracy :" + str(float(batch_size*n_valid_batches - this_validation_loss[-1])*100/(batch_size*n_valid_batches)) + "% "
#improve patience if loss improvement is good enough
if this_validation_loss[-1] < best_validation_loss * \
improvement_threshold:
patience = max(patience, iteration* patience_increase)
best_iter = iteration
best_validation_loss = min(best_validation_loss, this_validation_loss[-1])
new_leanring_rate = decay_learning_rate()
if visualize_flag is True:
if epoch_counter % visualize_after_epochs is 0:
# saving down images.
if main_img_visual is False:
for i in xrange(n_visual_images):
curr_img = numpy.asarray(numpy.reshape(train_set_x.get_value( borrow = True )[visualize_ind[i]],[height, width, channels] ) * 255., dtype='uint8' )
if verbose is True:
cv2.imshow("Image Number " +str(i) + "_label_" + str(train_set_y.eval()[visualize_ind[i]]), curr_img)
cv2.imwrite("../visuals/images/image_" + str(i)+ "_label_" + str(train_set_y.eval()[visualize_ind[i]]) + ".jpg", curr_img )
main_img_visual = True
# visualizing activities.
activity = activities(0)
for m in xrange(len(nkerns)): #For each layer
loc_ac = '../visuals/activities/layer_' + str(m) + "/epoch_" + str(epoch_counter) +"/"
if not os.path.exists(loc_ac):
os.makedirs(loc_ac)
current_activity = activity[m]
for i in xrange(n_visual_images): # for each randomly chosen image .. visualize its activity
visualize(current_activity[visualize_ind[i]], loc = loc_ac, filename = 'activity_' + str(i) + "_label_" + str(train_set_y.eval()[visualize_ind[i]]) +'.jpg' , show_img = display_flag)
# visualizing the filters.
for m in xrange(len(nkerns)):
if m == 0: # first layer outpus.
if channels == 3: # if the image is color, then first layer looks at color pictures and I can visualize the filters also as color.
curr_image = weights[m].eval()
if not os.path.exists('../visuals/filters/layer_'+str(m)+'/epoch_'+str(epoch_counter)):
os.makedirs('../visuals/filters/layer_'+str(m)+'/epoch_'+str(epoch_counter))
visualize_color_filters(curr_image, loc = '../visuals/filters/layer_' + str(m) + '/' + 'epoch_' + str(epoch_counter) + '/' , filename = 'kernel_0.jpg' , show_img = display_flag)
else: # visualize them as grayscale images.
for i in xrange(weights[m].shape.eval()[1]):
curr_image = weights[m].eval() [:,i,:,:]
if not os.path.exists('../visuals/filters/layer_'+str(m)+'/epoch_'+str(epoch_counter)):
os.makedirs('../visuals/filters/layer_'+str(m)+'/epoch_'+str(epoch_counter))
visualize(curr_image, loc = '../visuals/filters/layer_' + str(m) + '/' + 'epoch_' + str(epoch_counter) + '/' , filename = 'kernel_' + str(i) + '.jpg' , show_img = display_flag)
else:
for i in xrange(nkerns[m-1]):
curr_image = weights[m].eval()[:,i,:,:]
if not os.path.exists('../visuals/filters/layer_'+str(m)+'/epoch_'+str(epoch_counter)):
os.makedirs('../visuals/filters/layer_'+str(m)+'/epoch_'+str(epoch_counter))
visualize(curr_image, loc = '../visuals/filters/layer_' + str(m) + '/' + 'epoch_' + str(epoch_counter) + '/' , filename = 'kernel_' + str(i) + '.jpg' , show_img = display_flag)
if patience <= iteration:
early_termination = True
break
save_network( 'network.pkl.gz', params, arch_params, data_params )
end_time = time.clock()
print "... training complete, took " + str((end_time - start_time)/ 60.) +" minutes"
###############
# TEST MODEL #
###############
start_time = time.clock()
print "... testing"
wrong = 0
predictions = []
class_prob = []
labels = []
if multi_load is False:
labels = test_set_y.eval().tolist()
for mini_batch in xrange(batches2test):
#print ".. Testing batch " + str(mini_batch)
wrong = wrong + int(test_model(mini_batch))
predictions = predictions + prediction(mini_batch).tolist()
class_prob = class_prob + nll(mini_batch).tolist()
print "... -> Total test accuracy : " + str(float((batch_size*n_test_batches)-wrong )*100/(batch_size*n_test_batches)) + " % out of " + str(batch_size*n_test_batches) + " samples."
else:
for batch in xrange(batches2test):
print ".. Testing batch " + str(batch)
# Load data for this batch
if data_params["type"] == 'mat':
test_data_x, test_data_y, test_data_y1 = load_data_mat(dataset, batch = batch + 1 , type_set = 'test')
elif data_params["type"] == 'skdata':
if dataset == 'caltech101':
test_data_x, test_data_y = load_skdata_caltech101(batch_size = load_batches, batch = batch + 1 , type_set = 'test', rand_perm = rand_perm, height = height, width = width )
test_set_x.set_value(test_data_x,borrow = True)
test_set_y.set_value(test_data_y,borrow = True)
labels = labels + test_set_y.eval().tolist()
for mini_batch in xrange(n_test_batches):
wrong = wrong + int(test_model(mini_batch))
predictions = predictions + prediction(mini_batch).tolist()
class_prob = class_prob + nll(mini_batch).tolist()
print "... -> Total test accuracy : " + str(float((batch_size*n_test_batches*batches2test)-wrong )*100/(batch_size*n_test_batches*batches2test)) + " % out of " + str(batch_size*n_test_batches*batches2test) + " samples."
end_time = time.clock()
correct = 0
confusion = numpy.zeros((outs,outs), dtype = int)
for index in xrange(len(predictions)):
if labels[index] is predictions[index]:
correct = correct + 1
confusion[int(predictions[index]),int(labels[index])] = confusion[int(predictions[index]),int(labels[index])] + 1
# Save down data
f = open(results_file_name, 'w')
for i in xrange(len(predictions)):
f.write(str(i))
f.write("\t")
f.write(str(labels[i]))
f.write("\t")
f.write(str(predictions[i]))
f.write("\t")
for j in xrange(outs):
f.write(str(class_prob[i][j]))
f.write("\t")
f.write('\n')
f = open(error_file_name,'w')
for i in xrange(len(this_validation_loss)):
f.write(str(this_validation_loss[i]))
f.write("\n")
f.close()
f = open(cost_file_name,'w')
for i in xrange(len(cost_saved)):
f.write(str(cost_saved[i]))
f.write("\n")
f.close()
f = open(confusion_file_name, 'w')
f.write(confusion)
f.close()
save_network( network_save_name, params, arch_params, data_params )
end_time = time.clock()
print "Testing complete, took " + str((end_time - start_time)/ 60.) + " minutes"
print "Confusion Matrix with accuracy : " + str(float(correct)/len(predictions)*100)
print confusion
print "Done"
pdb.set_trace()
#################
# Boiler PLate #
#################
## Boiler Plate ##
if __name__ == '__main__':
import sys
# for epoch in [0, mom_epoch_interval] the momentum increases linearly
optimization_params = {
"mom_start" : 0.5, # from mom_start to mom_end. After mom_epoch_interval, it stay at mom_end
"mom_end" : 0.98,
"mom_interval" : 100,
"mom_type" : 1, # if mom_type = 1 , classical momentum if mom_type = 0, no momentum, if mom_type = 2 Nesterov's accelerated gradient momentum
"initial_learning_rate" : 0.01, # Learning rate at the start
"learning_rate_decay" : 0.9998,
"l1_reg" : 0.0001, # regularization coeff for the last logistic layer and MLP layers
"l2_reg" : 0.0001, # regularization coeff for the last logistic layer and MLP layers
"ada_grad" : False,
"rms_prop" : True,
"rms_rho" : 0.9, # implement rms_prop with this rho
"rms_epsilon" : 1e-6, # implement rms_prop with this epsilon
"fudge_factor" : 1e-7, # Just to avoid divide by zero, but google even advocates trying '1'
}
filename_params = {
"results_file_name" : "../results/results_mnist_rotated_bg.txt", # Files that will be saved down on completion Can be used by the parse.m file
"error_file_name" : "../results/error_mnist_rotated_bg.txt",
"cost_file_name" : "../results/cost_mnist_rotated_bg.txt",
"confusion_file_name" : "../results/confusion_mnist_rotated_bg.txt",
"network_save_name" : "../results/mnist_rotated_bg.pkl.gz "
}
data_params = {
"type" : 'skdata', # Options: 'pkl', 'skdata' , 'mat' for loading pkl files, mat files for skdata files.
"loc" : 'mnist_rotated_bg', # location for mat or pkl files, which data for skdata files. Skdata will be downloaded and used from '~/.skdata/'
"batch_size" : 500, # For loading and for Gradient Descent Batch Size
"load_batches" : -1,
"batches2train" : 80, # Number of training batches.
"batches2test" : 24, # Number of testing batches.
"batches2validate" : 20, # Number of validation batches
"height" : 28, # Height of each input image
"width" : 28, # Width of each input image
"channels" : 1 # Number of channels of each input image
}
arch_params = {
# Decay of Learninig rate after each epoch of SGD
"squared_filter_length_limit" : 15,
"n_epochs" : 200, # Total Number of epochs to run before completion (no premature completion)
"validate_after_epochs" : 1, # After how many iterations to calculate validation set accuracy ?
"mlp_activations" : [ ReLU ], # Activations of MLP layers Options: ReLU, Sigmoid, Tanh
"cnn_activations" : [ ReLU, ReLU ,ReLU], # Activations for CNN layers Options: ReLU,
"dropout" : True, # Flag for dropout / backprop
"column_norm" : True,
"dropout_rates" : [ 0.5, 0.5 ], # Rates of dropout. Use 0 is backprop.
"nkerns" : [ 20 , 50, 50 ], # Number of feature maps at each CNN layer
"outs" : 10, # Number of output nodes ( must equal number of classes)
"filter_size" : [ 5 , 5 , 5 ], # Receptive field of each CNN layer
"pooling_size" : [ 1 , 2 , 2 ], # Pooling field of each CNN layer
"num_nodes" : [ 450 ], # Number of nodes in each MLP layer
"use_bias" : True, # Flag for using bias
"random_seed" : 23455, # Use same seed for reproduction of results.
"svm_flag" : False # True makes the last layer a SVM
}
visual_params = {
"visualize_flag" : True,
"visualize_after_epochs": 1,
"n_visual_images" : 20,
"display_flag" : False
}
run_cnn(
arch_params = arch_params,
optimization_params = optimization_params,
data_params = data_params,
filename_params = filename_params,
visual_params = visual_params,
verbose = False, # True prints in a lot of intermetediate steps, False keeps it to minimum.
)
| mit |
Akshay0724/scikit-learn | sklearn/manifold/locally_linear.py | 19 | 25916 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.extmath import stable_cumsum
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg : float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=1):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1, n_jobs=n_jobs).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None, n_jobs=1):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg, n_jobs=n_jobs)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float64)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float64)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
# build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1] *
U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
# find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
# choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
# find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
# calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
# find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = stable_cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
# Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float64)
for i in range(N):
s_i = s_range[i]
# select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
# compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
# Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
# Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
# We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h) +
(1 - alpha_i) * w_reg[i, :, None])
# Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
# We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs)
random_state = check_random_state(self.random_state)
X = check_array(X, dtype=float)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg, n_jobs=self.n_jobs)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
Fireblend/scikit-learn | sklearn/linear_model/__init__.py | 268 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
jmargeta/scikit-learn | examples/linear_model/plot_sgd_iris.py | 4 | 2174 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = pl.contourf(xx, yy, Z, cmap=pl.cm.Paired)
pl.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
pl.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=pl.cm.Paired)
pl.title("Decision surface of multi-class SGD")
pl.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = pl.xlim()
ymin, ymax = pl.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
pl.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
pl.legend()
pl.show()
| bsd-3-clause |
jmargeta/scikit-learn | sklearn/utils/extmath.py | 4 | 13622 | """
Extended math utilities.
"""
# Authors: G. Varoquaux, A. Gramfort, A. Passos, O. Grisel
# License: BSD
import warnings
import numpy as np
from scipy import linalg
from . import check_random_state
from .fixes import qr_economic
from ..externals.six.moves import xrange
def norm(v):
v = np.asarray(v)
__nrm2, = linalg.get_blas_funcs(['nrm2'], [v])
return __nrm2(v)
def _fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(np.linalg.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
# XXX: Should be implemented as in numpy, using ATLAS
# http://projects.scipy.org/numpy/browser/ \
# trunk/numpy/linalg/linalg.py#L1559
ld = np.sum(np.log(np.diag(A)))
a = np.exp(ld / A.shape[0])
d = np.linalg.det(A / a)
ld += np.log(d)
if not np.isfinite(ld):
return -np.inf
return ld
def _fast_logdet_numpy(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
# Numpy >= 1.5 provides a fast logdet
if hasattr(np.linalg, 'slogdet'):
fast_logdet = _fast_logdet_numpy
else:
fast_logdet = _fast_logdet
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly"""
from scipy import sparse
if sparse.issparse(a) or sparse.issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return np.dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None,
n_iterations=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
if n_iterations is not None:
warnings.warn("n_iterations was renamed to n_iter for consistency "
"and will be removed in 0.16.", DeprecationWarning)
n_iter = n_iterations
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = qr_economic(Y)
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0,
n_iterations=None):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
if n_iterations is not None:
warnings.warn("n_iterations was renamed to n_iter for consistency "
"and will be removed in 0.16.", DeprecationWarning)
n_iter = n_iterations
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, s, V = svd_flip(U, s, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond, rcond : float or None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> from numpy import *
>>> a = random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> allclose(a, dot(a, dot(B, a)))
True
>>> allclose(B, dot(B, dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
References
----------
http://stackoverflow.com/questions/1208118/using-numpy-to-build-an-array-of-all-combinations-of-two-arrays
"""
arrays = [np.asarray(x).ravel() for x in arrays]
dtype = arrays[0].dtype
n = np.prod([x.size for x in arrays])
if out is None:
out = np.empty([n, len(arrays)], dtype=dtype)
m = n / arrays[0].size
out[:, 0] = np.repeat(arrays[0], m)
if arrays[1:]:
cartesian(arrays[1:], out=out[0:m, 1:])
for j in xrange(1, arrays[0].size):
out[j * m:(j + 1) * m, 1:] = out[0:m, 1:]
return out
def svd_flip(u, s, v):
"""Sign correction to ensure deterministic output from SVD
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, s, v: arrays,
The output of `linalg.svd` or `sklearn.utils.extmath.randomized_svd`,
with matching inner dimensions so one can compute `np.dot(u * s, v)`.
Returns
-------
u_adjusted, s, v_adjusted: arrays with the same dimensions as the input.
"""
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
return u, s, v
| bsd-3-clause |
hassaku/deformable-supervised-nmf | deformable_supervised_nmf.py | 1 | 10076 | #!/usr/bin/env python
# coding: utf-8
import sys
import numpy as np
import sklearn.metrics as sk_metrics
from sklearn.base import BaseEstimator, TransformerMixin
class DeformableSupervisedNMF(BaseEstimator, TransformerMixin):
"""
Reference:
http://www.slideshare.net/DaichiKitamura/nmf-in-japanese
"""
def __init__(self,
supervised_components_list=None, unknown_componets=None,
supervised_max_iter_list=[], unknown_max_iter=100,
eta=0.1, mu_list=[0, 0, 0, 0],
X_list=[], progress=False):
"""
:param supervised_components_list:
:param unknown_componets:
:param supervised_max_iter_list:
:param unknown_max_iter:
:param eta: rate of deforming. default is 0.1.
:param mu_list: penalty coefficients between following features
0: supervised and deformed
1: supervised and unknown
2: deformed and unknown
3: (supervised + deformed) and unknown
:param X_list:
:param progress:
:return:
"""
if type(supervised_components_list) is not list:
print "supervised_components_list must be a list."
sys.exit(-1)
if len(supervised_max_iter_list) == 0:
supervised_max_iter_list = [100] * len(supervised_components_list)
self.__supervised_components_list = supervised_components_list
self.__unknown_components = unknown_componets
self.__supervised_max_iter_list = supervised_max_iter_list
self.__unknown_max_iter = unknown_max_iter
self.mu_list = mu_list
self.eta = eta
self.__eps = 1e-8 # for avoidance of division by zero
self.__tolerance = 1e-8 # for stopping iteration
self.progress = progress
self.supervised_features_list = self.__fit_supervised(X_list, max_iter_list=supervised_max_iter_list)
self.unknown_features = None
def fit_transform(self, X, y=None):
return self.__fit_deformable(X)
def fit(self, X, y=None, **params):
self.fit_transform(X, y)
return self
def __debug(self, msg):
if self.progress:
print(msg)
def __fit_supervised(self, X_list, max_iter_list):
supervised_features_list = []
for xi, xdata in enumerate(X_list):
xdata = np.mat(xdata)
T, D = xdata.shape
X_abs = abs(xdata)
H0 = np.mat(np.random.rand(self.__supervised_components_list[xi], D))
U0 = np.mat(np.random.rand(T, self.__supervised_components_list[xi]))
for curr_iter in range(max_iter_list[xi]):
update_U = (H0 * X_abs.T / (self.__eps + (H0 * H0.T) * U0.T)).T
U = np.multiply(U0, update_U)
U0 = U
update_H = (X_abs.T * U / (self.__eps + H0.T * (U.T * U))).T
H = np.multiply(H0, update_H)
H0 = H
rse = np.sqrt(sk_metrics.mean_squared_error(X_abs, U*H))
max_update = np.max(np.max(update_H))
if max_update < self.__tolerance:
self.__debug("Finished (max_update: %f)" % max_update)
break
self.__debug("%d: %f" % (curr_iter, rse))
supervised_features_list.append(H0)
return supervised_features_list
def __fit_deformable(self, X):
Z = np.mat(abs(X)).T
dims, samples = Z.shape
supervised_features = np.vstack(self.supervised_features_list)
supervised_features_dims = supervised_features.shape[0]
F = np.array(abs(supervised_features)).T
D = np.array(np.random.rand(dims, supervised_features_dims))
H = np.array(np.random.rand(dims, self.__unknown_components))
G = np.array(np.random.rand(supervised_features_dims, samples))
U = np.array(np.random.rand(self.__unknown_components, samples))
for it in range(self.__unknown_max_iter):
D, H, G, U, rse, update_value = self.__update(Z, F, D, H, G, U,
mu=self.mu_list, eta=self.eta, it=it)
self.__debug("%d: F+D: %f, H: %f, G: %f, U:%f rse:%f" %
(it, (F+D).mean(), H.mean(), G.mean(), U.mean(), rse))
if update_value < self.__tolerance:
break
bias = 0
for ci, components in enumerate(self.__supervised_components_list):
self.supervised_features_list[ci] = supervised_features[bias:components+bias, :]
bias += components
self.unknown_features = np.dot(np.mat(X).T - np.dot(supervised_features.T, G), U.I).T
self.__debug("Finished (last update value: %f)" % update_value)
return self.supervised_features_list + [self.unknown_features]
def __update(self, Z, F, D, H, G, U, mu, eta=0.3, it=None):
dims, samples = Z.shape
F = np.mat(F)
D = np.mat(D)
H = np.mat(H)
G = np.mat(G)
U = np.mat(U)
V1 = 2 * mu[0] * np.dot(F, np.sum(np.multiply(F, D), axis=0).T)
V2 = 2 * mu[2] * np.dot(H, np.dot(D.T, H).T)
V = np.tile(V1, F.shape[1]) + V2
# update D ###########################
R = np.dot(F+D, G) + np.dot(H, U)
## D plus
D_numer1 = (eta*F + D)
D_numer2 = np.dot(np.divide(Z, R + self.__eps), G.T)
D_numer = np.multiply(D_numer1, D_numer2)
D_denom1 = np.tile(np.sum(G, axis=1), dims).T
D_denom2 = 2 * mu[1] * np.dot(H, (np.dot((F + D).T, H)).T)
D_denom = D_denom1 + V + D_denom2
D_plus = np.divide(D_numer, D_denom + self.__eps) - eta * F
## D minus
D_numer2 -= V
D_numer = np.multiply(D_numer1, D_numer2)
D_denom = D_denom1 + D_denom2
D_minus = np.divide(D_numer, D_denom + self.__eps) - eta * F
D0 = np.mat(np.zeros((D.shape[0], D.shape[1])))
D0[V >= 0] = np.array(D_plus)[V >= 0]
D0[V < 0] = np.array(D_minus)[V < 0]
D = D0
R = np.dot(F+D, G) + np.dot(H, U)
# update H ###########################
W = 2 * mu[2] * np.dot(D, np.dot(D.T, H))
S = 2 * mu[3] * np.dot(F + D, np.dot((F + D).T, H))
## H plus
H_numer1 = H
H_numer2 = np.dot(np.divide(Z, R + self.__eps), U.T)
H_numer = np.multiply(H_numer1, H_numer2)
H_denom1 = np.tile(np.sum(U, axis=1), dims).T
H_denom2 = 2 * mu[1] * np.dot(F, np.dot(F.T, H))
H_denom = H_denom1 + H_denom2 + W + S
H_plus = np.divide(H_numer, H_denom + self.__eps)
## H minus
H_numer2 -= W
H_numer = np.multiply(H_numer1, H_numer2)
H_denom = H_denom1 + H_denom2 + S
H_minus = np.divide(H_numer, H_denom + self.__eps)
H0 = np.mat(np.zeros((H.shape[0], H.shape[1])))
H0[W >= 0] = np.array(H_plus)[W >= 0]
H0[W < 0] = np.array(H_minus)[W < 0]
update_value = np.nanmax(np.nanmax(np.divide(H0, H + self.__eps)))
H = H0
R = np.dot(F+D, G) + np.dot(H, U)
# update G ###########################
G_numer1 = np.dot((F + D).T, np.divide(Z, R + self.__eps))
G_numer = np.multiply(G, G_numer1)
G_denom = np.tile(np.sum(F + D, axis=0).T, samples)
G0 = np.divide(G_numer, G_denom + self.__eps)
# update U ###########################
U_numer1 = np.dot(H.T, np.divide(Z, R + self.__eps))
U_numer = np.multiply(U, U_numer1)
U_denom = np.tile(np.sum(H, axis=0).T, samples)
U0 = np.divide(U_numer, U_denom + self.__eps)
# reconstruction err #################
rse = np.sqrt(sk_metrics.mean_squared_error(Z, (F + D0)*G0 + H0*U0))
return D0, H0, G0, U0, rse, update_value
if __name__ == "__main__":
import matplotlib.pyplot as plt
from scipy import signal
# Generate sample data
np.random.seed(0)
n_samples = 200
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1: sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2: square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
def __format_data(x):
y = np.tile(x, [5, 1])
y += 0.2 * np.random.normal(size=y.shape) # Add noise
y /= y.std(axis=0)
y -= y.min()
return y
tx1 = __format_data(s1)
tx2 = __format_data(s2)
ux = __format_data(s3)
dsnmf = DeformableSupervisedNMF(supervised_components_list=[5, 5], unknown_componets=5,
supervised_max_iter_list=[1000]*2, unknown_max_iter=5000,
eta=0.01, mu_list=[0.01, 0.01, 0.01, 0.01],
X_list=[tx1, tx2], progress=True)
dx1 = 1.2 * __format_data(s1)
dx2 = 0.8 * __format_data(s2)
fx1, fx2, fx3 = dsnmf.fit_transform(dx1 + dx2 + ux)
plt.rcParams.update({'axes.titlesize': 'small'})
plt.subplot(434)
plt.plot(tx1.T)
plt.tick_params(labelbottom='off')
plt.title('pretrained signals')
plt.subplot(4, 3, 7)
plt.plot(tx2.T)
plt.tick_params(labelbottom='off')
plt.subplot(4, 3, 3)
plt.plot((dx1 + dx2 + ux).T)
plt.tick_params(labelbottom='off')
plt.title('input signals')
plt.subplot(4, 3, 5)
plt.plot(dx1.T)
plt.tick_params(labelbottom='off')
plt.title('truth signals')
plt.subplot(4, 3, 8)
plt.plot(dx2.T)
plt.tick_params(labelbottom='off')
plt.subplot(4, 3, 11)
plt.plot(ux.T)
plt.title('(unknown)')
plt.tick_params(labelbottom='off')
plt.subplot(4, 3, 6)
plt.plot(fx1.T)
plt.tick_params(labelbottom='off')
plt.title('decomposing signals')
plt.subplot(4, 3, 9)
plt.plot(fx2.T)
plt.tick_params(labelbottom='off')
plt.subplot(4, 3, 12)
plt.plot(fx3.T)
plt.tick_params(labelbottom='off')
plt.show()
| mit |
ithallojunior/NN_compare | xor_examples/xor_class.py | 2 | 1033 | #Example with a Classifier using the scikit-learn library
#example for the XOr gate
#It shows how to save the network to a file too
from sklearn.neural_network import MLPClassifier
import pickle
X = [[0., 0.],[0., 1.], [1., 0.], [1., 1.]] # each one of the entries 00 01 10 11
y = [0, 1, 1, 0] # outputs for each one of the entries
clf = MLPClassifier(algorithm='l-bfgs', alpha=1e-5, hidden_layer_sizes=(10), random_state=1, verbose=True, max_iter=1000)
clf.fit(X, y)
outp = clf.predict([[0., 0.],[0., 1.], [1., 0.], [1., 1.]])
print'Results:'
print '0 0:', outp[0]
print '0 1:', outp[1]
print '1 0:', outp[2]
print '1 1:', outp[3]
print'Score:', clf.score(X, y)
#saving into file for later usage
f = open('data.ann', 'r+w')
mem = pickle.dumps(clf)
f.write(mem)
f.close()
f = open('data.ann', 'r+w')
nw = f.read()
mem = pickle.loads(nw)
mem.predict([[1., 1.],[1., 0.], [0., 1.], [0., 0.]])
print'Results:'
print '0 0:', outp[3]
print '0 1:', outp[2]
print '1 0:', outp[1]
print '1 1:', outp[0]
print'Score:', clf.score(X, y)
| mit |
woobe/h2o | py/testdir_hosts_fvec/test_exec_import_hosts_bigfiles.py | 2 | 4497 | import unittest
import random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_exec as h2e
zeroList = [
'Result0 = 0',
]
exprList = [
'Result<n>.hex = log(<keyX>[<col1>])',
'Result<n>.hex = randomBitVector(19,0,123) + Result<n-1>.hex',
'Result<n>.hex = randomFilter(<keyX>,<col1>,<row>)',
'Result<n>.hex = factor(<keyX>[<col1>])',
'Result<n>.hex = slice(<keyX>[<col1>],<row>)',
'Result<n>.hex = colSwap(<keyX>,<col1>,(<keyX>[2]==0 ? 54321 : 54321))',
'Result<n>.hex = <keyX>[<col1>]',
'Result<n>.hex = min(<keyX>[<col1>])',
'Result<n>.hex = max(<keyX>[<col1>]) + Result<n-1>.hex',
'Result<n>.hex = mean(<keyX>[<col1>]) + Result<n-1>.hex',
'Result<n>.hex = sum(<keyX>[<col1>]) + Result.hex',
]
def exec_list(exprList, lenNodes, csvFilename, hex_key):
h2e.exec_zero_list(zeroList)
# start with trial = 1 because trial-1 is used to point to Result0 which must be initted
trial = 1
while (trial < 100):
for exprTemplate in exprList:
# do each expression at a random node, to facilate key movement
nodeX = random.randint(0,lenNodes-1)
colX = random.randint(1,54)
# FIX! should tune this for covtype20x vs 200x vs covtype.data..but for now
row = str(random.randint(1,400000))
execExpr = h2e.fill_in_expr_template(exprTemplate, colX, trial, row, hex_key)
execResultInspect = h2e.exec_expr(h2o.nodes[nodeX], execExpr,
resultKey="Result"+str(trial)+".hex", timeoutSecs=60)
eri0 = execResultInspect[0]
eri1 = execResultInspect[1]
columns = eri0.pop('cols')
columnsDict = columns[0]
print "\nexecResult columns[0]:", h2o.dump_json(columnsDict)
print "\nexecResult [0]:", h2o.dump_json(eri0)
print "\nexecResult [1] :", h2o.dump_json(eri1)
min = columnsDict["min"]
h2o.verboseprint("min: ", min, "trial:", trial)
### self.assertEqual(float(min), float(trial),"what can we check here")
### h2b.browseJsonHistoryAsUrlLastMatch("Inspect")
# slows things down to check every iteration, but good for isolation
h2o.check_sandbox_for_errors()
print "Trial #", trial, "completed\n"
trial += 1
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(node_count=1)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
# wait while I inspect things
# time.sleep(1500)
h2o.tear_down_cloud()
def test_exec_import_hosts_bigfiles(self):
# just do the import folder once
timeoutSecs = 4000
# "covtype169x.data",
# "covtype.13x.shuffle.data",
# "3G_poker_shuffle"
# Update: need unique key names apparently. can't overwrite prior parse output key?
# replicating lines means they'll get reparsed. good! (but give new key names)
csvFilenameList = [
("covtype.data", "c"),
("covtype20x.data", "c20"),
("covtype200x.data", "c200"),
("billion_rows.csv.gz", "b"),
]
# h2b.browseTheCloud()
lenNodes = len(h2o.nodes)
importFolderPath = "standard"
for (csvFilename, hex_key) in csvFilenameList:
csvPathname = importFolderPath + "/" + csvFilename
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='local', hex_key=hex_key,
timeoutSecs=2000)
print csvFilename, 'parse time:', parseResult['response']['time']
print "Parse result['destination_key']:", parseResult['destination_key']
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvFilename
exec_list(exprList, lenNodes, csvFilename, hex_key)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
redavids/phylogenetics-tools | makeindeliblecontrol.py | 1 | 2727 | import dendropy
import sys
import os
#treefilename must include path if it's not where you're running this from
treefilename = str(sys.argv[1])
H = open(treefilename,'r')
treestring = H.read()
H.close()
header = str('/////////////////////////////////////////////////////////////////////////////////////\n' +
'// //\n'+
'// INDELible V1.03 control file - made by makeindeliblcondtrolfile.py //\n'+
'// //\n'+
'// A basic introduction to the structure of the INDELible control file. //\n'+
'// //\n'+
'/////////////////////////////////////////////////////////////////////////////////////\n'+
'// It is useful to know that anything on a line after two forward slashes is ignored.\n'
'/*\n\t'+
'Another useful thing to know is that anything after a forward slash and star\n'+
'is ignored until INDELible sees a star followed by a forward slash later on.\n'
+'*/ \n')
type = '[TYPE] NUCLEOTIDE 1 \n'
model = str('[MODEL] ruthmodel // Evolutionary models are defined in [MODEL] blocks\n') +
str('[submodel] JC // Here the substitution model is simply set as JC69.\n')+
str(' [indelmodel] NB 0.4 1 // Geometric indel length distribution (q=0.4, r=1)\n')+
str('[insertrate] 0.08 // insertion rate = 0.08 relative to substitution rate of 1\n')+
str('[deleterate] 0.12 // deletion rate = 0.12 relative to substitution rate of 1 \n')
tree = '[TREE] ' + ' ' + treefilename + ' ' + treestring + str(' // User trees are defined here\n')
partitions = '[PARTITIONS] ruthpartitionname // [PARTITIONS] blocks say which models go with \n\t
[+'treefilename +' ruthmodel 1000] // which trees and define the length of the
// sequence generated at the root (1000 here).'
evolver = '[EVOLVE] partitionname 2 outputname // This will generate 2 replicate datasets
// from the [PARTITIONS] block named above.
// The true alignment will be output in a file named outputname_TRUE.phy
// The unaligned sequences will be output in a file named outputname.fas
// To learn how to implement more complicated simulations (or different
// models) please consult the manual or the other example control files.'
controlfile = os.getcwd()+'/control.txt'
G = open(controlfile,'w')
G.write(header)
G.write(type)
G.write(model)
G.write(tree)
G.write(partitions)
G.write(evolver)
G.close()
| mit |
Juniper/ceilometer | ceilometer/tests/test_notifier.py | 2 | 3359 | #
# Copyright 2013 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/notifier.py
"""
from oslotest import base
from ceilometer import notifier
from ceilometer import pipeline
from ceilometer import transformer
MESSAGE = {
u'event_type': u'compute.instance.create.end',
u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451',
u'payload': {u'created_at': u'2012-05-08 20:23:41',
u'deleted_at': u'',
u'disk_gb': 0,
u'display_name': u'testme',
u'fixed_ips': [{u'address': u'10.0.0.2',
u'floating_ips': [],
u'meta': {},
u'type': u'fixed',
u'version': 4}],
u'image_ref_url': u'http://10.0.2.15:9292/images/UUID',
u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1',
u'instance_type': u'm1.tiny',
u'instance_type_id': 2,
u'launched_at': u'2012-05-08 20:23:47.985999',
u'memory_mb': 512,
u'state': u'active',
u'state_description': u'',
u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e',
u'user_id': u'1e3ce043029547f1a61c1996d1a531a2',
u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3',
u'vcpus': 1,
u'root_gb': 0,
u'ephemeral_gb': 0,
u'host': u'compute-host-name',
u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4',
u'os_type': u'linux?',
u'architecture': u'x86',
u'image_ref': u'UUID',
u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5',
u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6',
},
u'priority': u'INFO',
u'publisher_id': u'compute.vagrant-precise',
u'timestamp': u'2012-05-08 20:23:48.028195',
}
class TestNotifier(base.BaseTestCase):
def test_process_notification(self):
transformer_manager = transformer.TransformerExtensionManager(
'ceilometer.transformer',
)
notifier._pipeline_manager = pipeline.PipelineManager(
[{
'name': "test_pipeline",
'interval': 60,
'counters': ['*'],
'transformers': [],
'publishers': ["test"],
}],
transformer_manager)
pub = notifier._pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(0, len(pub.samples))
notifier.notify(None, MESSAGE)
self.assertTrue(len(pub.samples) > 0)
self.assertIn('disk.ephemeral.size',
[c.name for c in pub.samples])
| apache-2.0 |
ningchi/scikit-learn | sklearn/tests/test_cross_validation.py | 2 | 40182 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer, LabelBinarizer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T.shape[0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval._check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval._check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval._check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
with warnings.catch_warnings(record=True):
# deprecated sequence of sequence format
cv = cval._check_cv(3, X, y_seq_of_seqs, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_indicator_matrix = LabelBinarizer().fit_transform(y_seq_of_seqs)
cv = cval._check_cv(3, X, y_indicator_matrix, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval._check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
| bsd-3-clause |
bartnijssen/VIC | tests/test_utils.py | 8 | 38567 | # Builtin libs
import os
import re
import glob
import traceback
import warnings
from collections import OrderedDict, namedtuple
import multiprocessing as mp
# Computation libs
import numpy as np
import pandas as pd
import xarray as xr
# Plotting libs
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
# Tools from tonic
from tonic.models.vic.vic import (VICRuntimeError,
default_vic_valgrind_error_code)
from tonic.testing import check_completed, check_for_nans, VICTestError
OUTPUT_WIDTH = 100
ERROR_TAIL = 20 # lines
VICOutFile = namedtuple('vic_out_file',
('dirpath', 'prefix', 'lat', 'lon', 'suffix'))
class VICReturnCodeError(Exception):
pass
class VICValgrindError(Exception):
pass
def setup_test_dirs(testname, out_dir, mkdirs=['results', 'state',
'logs', 'plots']):
'''create test directories for testname'''
dirs = OrderedDict()
dirs['test'] = os.path.join(out_dir, testname)
for d in mkdirs:
dirs[d] = os.path.join(dirs['test'], d)
for dirname in dirs.values():
os.makedirs(dirname, exist_ok=True)
return dirs
def print_test_dict(d):
'''print a nicely formatted set of test results'''
print('{0: <48} | {1: <6} | {2}'.format('Test Name', 'Passed', 'Comment'))
print('-'.ljust(OUTPUT_WIDTH, '-'))
for k, v in d.items():
print('{0: <48} | {1: <6} | {2}'.format(clip_string(v.name, 48),
str(bool(v.passed)),
v.comment))
print('-'.ljust(OUTPUT_WIDTH, '-'))
def clip_string(string, length=50):
if len(string) > length:
string = string[:length - 3] + '...'
return string
def print_tail(string, n=20, indent='\t--->'):
'''print tail of multiline string'''
try:
lines = string.decode().splitlines()
except UnicodeDecodeError:
lines = string.splitlines()
for l in lines[-n:]:
print('{0}{1}'.format(indent, l))
def replace_global_values(gp, replace):
'''given a multiline string that represents a VIC global parameter file,
loop through the string, replacing values with those found in the
replace dictionary'''
gpl = []
for line in iter(gp.splitlines()):
line_list = line.split()
if line_list:
key = line_list[0]
if key in replace:
value = replace.pop(key)
val = list([str(value)])
else:
val = line_list[1:]
gpl.append('{0: <20} {1}\n'.format(key, ' '.join(val)))
if replace:
for key, val in replace.items():
try:
value = ' '.join(val)
except:
value = val
gpl.append('{0: <20} {1}\n'.format(key, value))
return gpl
def drop_tests(config, driver):
'''helper function to remove tests that should not be run for driver'''
new = {}
if not isinstance(driver, list): # if single driver
for key, test_cfg in config.items():
try:
if not isinstance(test_cfg['driver'], list):
if test_cfg['driver'].lower() == driver.lower():
new[key] = test_cfg
except KeyError:
raise KeyError('test configuration must specify driver')
else: # if multiple drivers
for key, test_cfg in config.items():
try:
if isinstance(test_cfg['driver'], list):
# check whether the test has the same number of drivers
if len(test_cfg['driver']) == len(driver):
# check whether the test wants to test the same drivers
flag = 1
for d in driver:
if d not in test_cfg['driver']:
flag = 0
if flag == 1:
new[key] = test_cfg
except KeyError:
raise KeyError('test configuration must specify driver')
return new
def pop_run_kwargs(config):
'''pop run kwargs for VIC executable'''
run_kwargs = {}
run_kwargs['valgrind'] = config.pop('valgrind', False)
run_kwargs['mpi_proc'] = config.pop('mpi_proc', None)
return run_kwargs
def check_returncode(exe, expected=0):
'''check return code given by VIC, raise error if appropriate'''
if exe.returncode == expected:
return None
elif exe.returncode == default_vic_valgrind_error_code:
raise VICValgrindError(
'Valgrind raised an error when running: \
"{}"'.format(exe.argstring))
else:
raise VICReturnCodeError(
'VIC return code ({0}) did not match expected ({1}) when running '
'"{2}"'.format(exe.returncode, expected, exe.argstring))
def process_error(error, vic_exe):
'''Helper function to process possible error raised during testing'''
tail = None
if isinstance(error, VICRuntimeError):
test_comment = 'Test failed during simulation'
tail = vic_exe.stderr
elif isinstance(error, VICTestError):
test_comment = 'Test failed during testing of output files'
elif isinstance(error, VICValgrindError):
test_comment = 'Test failed due to memory error detected by valgrind'
tail = vic_exe.stderr
elif isinstance(error, VICReturnCodeError):
test_comment = 'Test failed due to incorrect return code'
tail = vic_exe.stderr
elif isinstance(error, AssertionError):
test_comment = 'AssertionError raised during testing'
else:
test_comment = 'Unknown test failure'
traceback.print_stack()
print('\t{0}'.format(test_comment))
print('\t{0}'.format(error))
if tail is not None:
print('\tLast {0} lines of standard out:'.format(ERROR_TAIL))
print_tail(tail, n=ERROR_TAIL)
return test_comment, error
def test_classic_driver_all_complete(fnames):
'''
Test that all VIC files in fnames have the same first and last index
position
'''
start = None
end = None
for fname in fnames:
df = read_vic_ascii(fname)
# check that each dataframe includes all timestamps
if (start is not None) and (end is not None):
check_completed(df, start, end)
else:
start = df.index[0]
end = df.index[-1]
def test_classic_driver_no_output_file_nans(fnames):
'''Test that all VIC classic driver output files in fnames have no nans'''
for fname in fnames:
print(fname)
df = read_vic_ascii(fname)
check_for_nans(df)
# TODO: Update tonic version of this function,
# need to check that subdaily works
def read_vic_ascii(filepath, parse_dates=True, datetime_index=None, sep='\t',
comment='#', **kwargs):
'''Generic reader function for VIC ASCII output with a standard header
filepath: path to VIC output file
header (True or False): Standard VIC header is present
parse_dates (True or False): Parse dates from file
datetime_index (Pandas.tseries.index.DatetimeIndex): Index to use as
datetime index names (list like): variable names
**kwargs: passed to Pandas.read_table
returns Pandas.DataFrame
'''
df = pd.read_table(filepath, sep=sep, comment=comment, **kwargs)
# Strip extra whitespace around variable names
df.rename(columns=lambda x: x.strip(), inplace=True)
if parse_dates and datetime_index:
raise ValueError('cannot specify both parse_dates and datetime_index')
if parse_dates:
# add datetime index
time_cols = ['YEAR', 'MONTH', 'DAY']
df.index = pd.to_datetime(df[time_cols])
if 'SEC' in df:
df.index += pd.Series(
[pd.Timedelta(s, unit='s') for s in df['SEC']], index=df.index)
time_cols.append('SEC')
df.drop(time_cols, inplace=True, axis=1)
if datetime_index is not None:
df.index = datetime_index
return df
def find_global_param_value(gp, param_name):
''' Return the value of a global parameter
Parameters
----------
gp: <str>
Global parameter file, read in by read()
param_name: <str>
The name of the global parameter to find
Returns
----------
line_list[1]: <str>
The value of the global parameter
'''
for line in iter(gp.splitlines()):
line_list = line.split()
if line_list == []:
continue
key = line_list[0]
if key == param_name:
return line_list[1]
def check_multistream_classic(fnames):
'''
Test the multistream aggregation in the classic driver '''
how_dict = {'OUT_ALBEDO': 'max',
'OUT_SOIL_TEMP_1': 'min',
'OUT_PRESSURE': 'sum',
'OUT_AIR_TEMP': 'first',
'OUT_SWDOWN': 'mean',
'OUT_LWDOWN': 'last'}
streams = {} # Dictionary to store parsed stream names
gridcells = [] # list of gridcells (lat_lon)
for path in fnames:
# split up the path name to get info about the stream
resultdir, fname = os.path.split(path)
pieces = os.path.splitext(fname)[0].split('_')
gridcells.append('_'.join(pieces[-2:]))
stream = '_'.join(pieces[:-2]) # stream name
freq_n = pieces[-3] # stream frequency n
# set the stream frequency for pandas resample
if 'NSTEPS' in stream:
inst_stream = stream
else:
if 'NDAYS' in stream:
streams[stream] = '{}D'.format(freq_n)
elif 'NHOURS' in stream:
streams[stream] = '{}H'.format(freq_n)
elif 'NMINUTES' in stream:
streams[stream] = '{}min'.format(freq_n)
elif 'NSECONDS' in stream:
streams[stream] = '{}S'.format(freq_n)
else:
ValueError('stream %s not supported in this test' % stream)
# unique gridcells
gridcells = list(set(gridcells))
# Loop over all grid cells in result dir
for gridcell in gridcells:
fname = os.path.join(resultdir,
'{}_{}.txt'.format(inst_stream, gridcell))
instant_df = read_vic_ascii(fname)
# Loop over all streams
for stream, freq in streams.items():
fname = os.path.join(resultdir,
'{}_{}.txt'.format(stream, gridcell))
agg_df = read_vic_ascii(fname)
# Setup the resample of the instantaneous data
rs = instant_df.resample(freq)
# Loop over the variables in the stream
for key, how in how_dict.items():
# Get the aggregated values (from VIC)
actual = agg_df[key].values
# Calculated the expected values based on the resampling from
# pandas
expected = rs[key].aggregate(how).values
# Compare the actual and expected (with tolerance)
np.testing.assert_almost_equal(
actual, expected, decimal=4,
err_msg='Variable=%s, freq=%s, how=%s: '
'failed comparison' % (key, freq, how))
def setup_subdirs_and_fill_in_global_param_driver_match_test(
dict_s, result_basedir, state_basedir, test_data_dir):
''' Fill in global parameter output directories for multiple driver runs
for driver-match testing
Parameters
----------
dict_s: <dict of string.Template>
A dict of template of the global param file to be filled in
Keys: driver name
result_basedir: <str>
Base directory of output fluxes results; runs with different number of
processors are output to subdirectories under the base directory
state_basedir: <str>
Base directory of output state results; runs with different number of
processors are output to subdirectories under the base directory
test_data_dir: <str>
Base directory of test data
Returns
----------
dict_global_param: <dict>
A dict of global parameter strings to be run with parameters filled in
Require
----------
os
'''
dict_global_param = {}
for driver in dict_s.keys():
# Set up subdirectories for results and states
result_dir = os.path.join(result_basedir, driver)
state_dir = os.path.join(state_basedir, driver)
os.makedirs(result_dir, exist_ok=True)
os.makedirs(state_dir, exist_ok=True)
# Fill in global parameter options
s = dict_s[driver]
dict_global_param[driver] = s.safe_substitute(
test_data_dir=test_data_dir,
result_dir=result_dir,
state_dir=state_dir)
return(dict_global_param)
def parse_classic_driver_outfile_name(fname):
'''helper function to parse VIC classic driver output file name'''
resultdir, filename = os.path.split(fname)
prefix, suffix = os.path.splitext(filename)
pieces = prefix.split('_')
lat, lon = map(float, pieces[-2:])
return VICOutFile(resultdir, prefix, lat, lon, suffix)
def check_drivers_match_fluxes(list_drivers, result_basedir):
''' Check whether the flux results are similar cross multiple drivers
Parameters
----------
list_drivers: <list>
A list of driver names to be compared
e.g., ['classic'; 'image']
NOTE: must have classic driver; classic driver will be the base for
comparison
result_basedir: <str>
Base directory of output fluxes results; results for drivers are
subdirectories under the base directory
Require
----------
glob
xarray
numpy
warnings
collections.namedtuple
parse_classic_driver_outfile_name
VICOutFile
read_vic_ascii
'''
# Identify all classic driver output flux files
try:
list_fnames_classic = glob.glob(
os.path.join(result_basedir, 'classic', '*'))
except:
raise ValueError('incorrect classic driver output for driver-match '
'test')
# Loop over all other drivers and compare with classic driver
for driver in list_drivers:
# skip classic driver
if driver == 'classic':
continue
# if image driver
if driver == 'image':
# load flux file
if len(glob.glob(os.path.join(
result_basedir, driver, '*.nc'))) > 1:
warnings.warn('More than one netCDF file found under'
'directory {}'.format(result_basedir))
fname = glob.glob(os.path.join(result_basedir, driver, '*.nc'))[0]
ds_image = xr.open_dataset(fname)
# loop over each grid cell from classic driver
for fname in list_fnames_classic:
gcell = parse_classic_driver_outfile_name(fname)
df_classic = read_vic_ascii(fname)
ds_image_cell = ds_image.sel(lat=gcell.lat, lon=gcell.lon,
method='nearest')
# compare each variable
for var in ds_image_cell.data_vars:
# if one [time] dimension
if len(ds_image_cell[var].coords) == 3:
# determine precision for comparison
# --- if all zeros for this variable, set
# --- decimal = 2 --- #
if np.sum(np.absolute(ds_image_cell[var].values)) == 0:
decimal = 2
# --- if not all zeros, set decimal depending on the
# maximum aboslute value of this variable so that the
# comparison has a reasonable precision. Specifically,
# decimal ~= - log10(max_abs_value) + 1 --- #
else:
decimal = int(round(- np.log10(np.max(np.absolute(
ds_image_cell[var].values))) + 1))
# --- keep decimal to be no greater than 4 --- #
if decimal > 4:
decimal = 4
# assert almost equal
np.testing.assert_almost_equal(
ds_image_cell[var].values, df_classic[var].values,
decimal=decimal,
err_msg='Variable {} is different in the classic '
'and image drivers'.format(var))
# if [time, nlayer]
elif len(ds_image_cell[var].coords) == 4:
for l in ds_image['nlayer']:
s_classic = df_classic['{}_{}'.format(var,
l.values)]
s_image = ds_image_cell[var].sel(
nlayer=l).to_series()
# determine precision for comparison
if np.mean(s_image.values) == 0:
decimal = 2
else:
decimal = int(round(- np.log10(np.max(
np.absolute(s_image.values))) + 1))
if decimal > 4:
decimal = 4
# assert almost eqaul
np.testing.assert_almost_equal(
s_image.values,
s_classic.values,
decimal=decimal,
err_msg='Variable {} is different in '
'the classic and image '
'drivers'.format(var))
def tsplit(string, delimiters):
'''Behaves like str.split but supports multiple delimiters. '''
delimiters = tuple(delimiters)
stack = [string]
for delimiter in delimiters:
for i, substring in enumerate(stack):
substack = substring.split(delimiter)
stack.pop(i)
for j, _substring in enumerate(substack):
stack.insert(i + j, _substring)
return stack
def read_snotel_swe_obs(filename, science_test_data_dir, items):
'''Reads in Snotel SWE obs and returns DataFrame. '''
filename_fullpath = os.path.join(science_test_data_dir,
'inputdata',
items['archive'],
'observations',
filename)
# load snotel obs
snotel_swe = pd.read_csv(filename_fullpath,
skiprows=0,
delim_whitespace=True,
names=['YEAR', 'MONTH', 'DAY', 'OUT_SWE'])
# add datetime index
time_cols = ['YEAR', 'MONTH', 'DAY']
snotel_swe.index = pd.to_datetime(snotel_swe[time_cols])
# remove year, day columns of DataFrame
snotel_swe.drop(time_cols, inplace=True, axis=1)
return snotel_swe
def read_vic_42_output(lat, lng, science_test_data_dir, items):
''' Reads output from VIC 4.2. '''
if items['compare_to'] == 'ecflux':
vic_42_file = 'en_bal_%s_%s' % (lat, lng)
vic_42_dir = os.path.join(science_test_data_dir, 'archive',
items['archive'], 'ecflux', 'results')
elif items['compare_to'] == 'snotel':
vic_42_file = 'outfile_%s_%s' % (lat, lng)
vic_42_dir = os.path.join(science_test_data_dir, 'archive',
items['archive'], 'snotel', 'results')
else:
raise ValueError("this option (%s) has not yet been implemented"
% items['compare_to'])
vic_42 = pd.read_csv(os.path.join(vic_42_dir, vic_42_file),
sep='\t',
skiprows=5)
# remove comment sign from column names in DataFrame
vic_42 = vic_42.rename(columns=lambda x: x.replace('#', ''))
# remove spaces from column names in DataFrame
vic_42 = vic_42.rename(columns=lambda x: x.replace(' ', ''))
# rename radiation variables to be consistent with VIC 5
if items['compare_to'] == 'ecflux':
vic_42 = vic_42.rename(columns=lambda x: x.replace('OUT_NET_SHORT',
'OUT_SWNET'))
vic_42 = vic_42.rename(columns=lambda x: x.replace('OUT_NET_LONG',
'OUT_LWNET'))
# add datetime index
time_cols = ['YEAR', 'MONTH', 'DAY']
vic_42.index = pd.to_datetime(vic_42[time_cols])
if 'HOUR' in vic_42:
vic_42.index += pd.Series(
[pd.Timedelta(s, unit='h') for s in vic_42['HOUR']],
index=vic_42.index)
time_cols.append('HOUR')
# remove year, day columns of DataFrame
vic_42.drop(time_cols, inplace=True, axis=1)
return vic_42
def read_vic_5_output(lat, lng, result_dir, items):
''' Read VIC 5.0.x output. '''
if items['compare_to'] == 'ecflux':
vic_5_file = 'en_bal_%s_%s.txt' % (lat, lng)
vic_5_dir = result_dir
elif items['compare_to'] == 'snotel':
vic_5_file = 'outfile_%s_%s.txt' % (lat, lng)
vic_5_dir = result_dir
else:
raise ValueError("this option (%s) has not yet been implemented"
% items['compare_to'])
vic_5 = pd.read_csv(os.path.join(vic_5_dir, vic_5_file),
skiprows=2,
sep='\t')
# remove spaces from column names
vic_5.rename(columns=lambda x: x.replace(' ', ''), inplace=True)
# add datetime index
time_cols = ['YEAR', 'MONTH', 'DAY']
vic_5.index = pd.to_datetime(vic_5[time_cols])
if 'SEC' in vic_5:
vic_5.index += pd.Series([pd.Timedelta(s, unit='s')
for s in vic_5['SEC']], index=vic_5.index)
time_cols.append('SEC')
# remove year, day columns of DataFrame
vic_5.drop(time_cols, inplace=True, axis=1)
return vic_5
def plot_science_tests(driver, test_type, science_test_data_dir, result_dir,
plot_dir, plots_to_make, compare_data, nproc):
''' makes science test figures
Parameters
----------
driver: <str>
Name of Driver
test_type: <str>
Name of test
science_test_data_dir: <str>
Science test data directory
result_dir: <str>
Result directory
plot_dir: <str>
Directory for output plots
plots_to_make <Dict>
Keys that indicate which plots should be made
compare_data <Dict>
Keys that indicate which datasets and model output to use for
comparison.
nproc <int>
Number of processors to use
Returns
----------
'''
if test_type == "science_test_snotel":
plot_snotel_comparison(driver,
science_test_data_dir,
compare_data,
result_dir,
plot_dir,
plots_to_make,
nproc)
elif test_type == "science_test_fluxnet":
plot_fluxnet_comparison(driver,
science_test_data_dir,
compare_data,
result_dir,
plot_dir,
plots_to_make,
nproc)
else:
raise ValueError("this option %s has not been implemented in the \
VIC 5.0 science test suite" % test_type)
def plot_snotel_comparison(driver, science_test_data_dir,
compare_data_dict,
result_dir, plot_dir,
plots_to_make, nproc):
''' makes snotel figures '''
# plot settings
plot_variables = {'OUT_SWE': 'mm', 'OUT_ALBEDO': 'fraction',
'OUT_SALBEDO': 'fraction', 'OUT_SNOW_DEPTH': 'mm',
'OUT_SNOW_CANOPY': '%', 'OUT_SNOW_PACK_TEMP':
'degrees C', 'OUT_SNOW_MELT': 'mm', 'OUT_R_NET':
'$W/{m^2}$', 'OUT_LATENT': '$W/{m^2}$',
'OUT_SENSIBLE': '$W/{m^2}$'}
context = "paper"
style = "whitegrid"
# --- Set up multiprocessing --- #
pool = mp.Pool(processes=nproc)
for filename in os.listdir(os.path.join(science_test_data_dir,
'inputdata',
'snotel',
'observations')):
pool.apply_async(plot_snotel_comparison_one_site,
(driver, science_test_data_dir,
compare_data_dict,
result_dir, plot_dir,
plots_to_make,
plot_variables, context, style, filename,))
# --- Finish multiprocessing --- #
pool.close()
pool.join()
def plot_snotel_comparison_one_site(
driver, science_test_data_dir,
compare_data_dict,
result_dir, plot_dir,
plots_to_make,
plot_variables, context, style, filename):
print(plots_to_make)
# get lat/lng from filename
file_split = re.split('_', filename)
lng = file_split[3].split('.txt')[0]
lat = file_split[2]
print('Plotting {} {}'.format(lat, lng))
# loop over data to compare
data = {}
for key, items in compare_data_dict.items():
# read in data
if key == "snotel":
data[key] = read_snotel_swe_obs(filename,
science_test_data_dir,
items)
elif key == "VIC.4.2.d":
data[key] = read_vic_42_output(lat, lng,
science_test_data_dir,
items)
else:
data[key] = read_vic_5_output(lat, lng,
result_dir,
items)
# loop over variables to plot
for plot_variable, units in plot_variables.items():
if 'water_year' in plots_to_make:
with plt.rc_context(dict(sns.axes_style(style),
**sns.plotting_context(context))):
fig, ax = plt.subplots(figsize=(10, 10))
df = pd.DataFrame({key: d[plot_variable] for key, d in
data.items() if plot_variable in d})
for key, series in df.iteritems():
series.plot(
use_index=True,
linewidth=compare_data_dict[key]['linewidth'],
ax=ax,
color=compare_data_dict[key]['color'],
linestyle=compare_data_dict[key]
['linestyle'],
zorder=compare_data_dict[key]['zorder'])
ax.legend(loc='upper left')
ax.set_ylabel("%s [%s]" % (plot_variable, units))
# save figure
os.makedirs(os.path.join(plot_dir, plot_variable),
exist_ok=True)
plotname = '%s_%s.png' % (lat, lng)
savepath = os.path.join(plot_dir, plot_variable, plotname)
plt.savefig(savepath, bbox_inches='tight')
print(savepath)
plt.clf()
plt.close()
def check_site_files(obs_dir, subdir):
return len(os.listdir(os.path.join(obs_dir, subdir))) > 0
def get_fluxnet_lat_lon(obs_dir, subdir):
# get CSV file from site directory to get lat/lng for site
try:
site_csv_file = glob.glob(os.path.join(obs_dir, subdir, 'AMF*.csv'))[0]
except IndexError:
site_csv_file = glob.glob(os.path.join(obs_dir, subdir, 'us*.csv'))[0]
with open(site_csv_file) as f:
second_line = list(f)[1]
# parse line from header to get lat/lng
str_split = tsplit(second_line,
('Latitude: ', 'Longitude: ', 'Elevation (masl): '))
lat = str_split[1].strip()
lng = str_split[2].strip()
return lat, lng
def read_fluxnet_obs(subdir, science_test_data_dir, items):
# column names for DataFrame (same as VIC variable names)
fluxnet_names = ['YEAR', 'MONTH', 'DAY', 'HOUR', 'PREC', 'AIR_TEMP',
'SWDOWN', 'LWDOWN', 'OUT_REL_HUMID', 'PRESSURE', 'WIND',
'OUT_EVAP', 'SOIL_TEMP_DEPTH1', 'SOIL_TEMP_DEPTH2',
'SOIL_TEMP_DEPTH3', 'SOIL_TEMP_DEPTH4',
'SOIL_TEMP_DEPTH5', 'OUT_SOIL_MOIST1', 'OUT_SOIL_MOIST2',
'OUT_SOIL_MOIST3', 'OUT_SOIL_MOIST4', 'OUT_SOIL_MOIST5',
'OUT_SOIL_TEMP1', 'OUT_SOIL_TEMP2', 'OUT_SOIL_TEMP3',
'OUT_SOIL_TEMP4', 'OUT_SOIL_TEMP5', 'OUT_SWNET',
'OUT_LWNET', 'OUT_SENSIBLE', 'OUT_LATENT',
'OUT_GRND_FLUX']
filename = '%s.stdfmt.hourly.local.txt' % subdir
# read in data with -9999.0000 as NaNs
obs_dir = os.path.join(science_test_data_dir, 'inputdata',
'ec_flux_towers', 'obs')
ecflux_df = pd.read_csv(os.path.join(obs_dir, subdir, filename),
skiprows=0,
delim_whitespace=True,
header=None,
names=fluxnet_names,
na_values=-9999.0000)
# add datetime index
time_cols = ['YEAR', 'MONTH', 'DAY']
ecflux_df.index = pd.to_datetime(ecflux_df[time_cols])
if 'HOUR' in ecflux_df:
ecflux_df.index += pd.Series(
[pd.Timedelta(s, unit='h') for s in ecflux_df['HOUR']],
index=ecflux_df.index)
time_cols.append('HOUR')
# remove year, day columns of DataFrame
ecflux_df.drop(time_cols, inplace=True, axis=1)
return ecflux_df
def plot_fluxnet_comparison(driver, science_test_data_dir,
compare_data_dict,
result_dir, plot_dir,
plots_to_make, nproc):
''' makes Ameriflux figures
'''
context = "paper"
style = "whitegrid"
var_names = {'OUT_LATENT': 'LH', 'OUT_SENSIBLE': 'H', 'OUT_SWNET':
'SW_NET', 'OUT_LWNET': 'LW NET'}
months = ['January', 'February', 'March', 'April', 'May',
'June', 'July', 'August', 'September',
'October', 'November', 'December']
# loop over Ameriflux sites
obs_dir = os.path.join(science_test_data_dir,
'inputdata',
'ec_flux_towers',
'obs')
# --- Set up multiprocessing --- #
pool = mp.Pool(processes=nproc)
for subdir in os.listdir(obs_dir):
pool.apply_async(plot_fluxnet_comparison_one_site,
(driver, science_test_data_dir,
compare_data_dict,
result_dir, plot_dir,
plots_to_make,
context, style, var_names, months, obs_dir, subdir,))
# --- Finish multiprocessing --- #
pool.close()
pool.join()
def plot_fluxnet_comparison_one_site(driver, science_test_data_dir,
compare_data_dict, result_dir, plot_dir,
plots_to_make, context, style, var_names,
months, obs_dir, subdir):
if check_site_files(obs_dir, subdir):
# get CSV file from site directory to get lat/lng for site
lat, lng = get_fluxnet_lat_lon(obs_dir, subdir)
print(lat, lng)
# loop over data to compare
data = {}
for key, items in compare_data_dict.items():
if key == "ecflux":
try:
# load Ameriflux data
data[key] = read_fluxnet_obs(subdir,
science_test_data_dir,
items)
except OSError:
warnings.warn(
"this %s site does not have data" % subdir)
elif key == "VIC.4.2.d":
try:
# load VIC 4.2 simulations
data[key] = read_vic_42_output(lat, lng,
science_test_data_dir,
items)
except OSError:
warnings.warn(
"this site has a lat/lng precision issue")
else:
try:
# load VIC 5 simulations
data[key] = read_vic_5_output(lat, lng,
result_dir,
items)
except OSError:
warnings.warn(
"this site has a lat/lng precision issue")
# make figures
# plot preferences
fs = 15
dpi = 150
if 'annual_mean_diurnal_cycle' in plots_to_make:
# make annual mean diurnal cycle plots
with plt.rc_context(dict(sns.axes_style(style),
**sns.plotting_context(context))):
f, axarr = plt.subplots(4, 1, figsize=(8, 8), sharex=True)
for i, (vic_var, variable_name) in enumerate(
var_names.items()):
# calculate annual mean diurnal cycle for each
# DataFrame
annual_mean = {}
for key, df in data.items():
annual_mean[key] = pd.DataFrame(
df[vic_var].groupby(df.index.hour).mean())
df = pd.DataFrame(
{key: d[vic_var] for key, d in annual_mean.items()
if vic_var in d})
for key, series in df.iteritems():
series.plot(
linewidth=compare_data_dict[key]['linewidth'],
ax=axarr[i],
color=compare_data_dict[key]['color'],
linestyle=compare_data_dict[key]['linestyle'],
zorder=compare_data_dict[key]['zorder'])
axarr[i].legend(loc='upper left')
axarr[i].set_ylabel(
'%s ($W/{m^2}$)' % variable_name,
size=fs)
axarr[i].set_xlabel('Time of Day (Hour)', size=fs)
axarr[i].set_xlim([0, 24])
axarr[i].xaxis.set_ticks(np.arange(0, 24, 3))
# save plot
plotname = '%s_%s.png' % (lat, lng)
os.makedirs(os.path.join(plot_dir, 'annual_mean'),
exist_ok=True)
savepath = os.path.join(plot_dir, 'annual_mean', plotname)
plt.savefig(savepath, bbox_inches='tight', dpi=dpi)
plt.clf()
plt.close()
if 'monthly_mean_diurnal_cycle' in plots_to_make:
# make monthly mean diurnal cycle plots
with plt.rc_context(dict(sns.axes_style(style),
**sns.plotting_context(context))):
f, axarr = plt.subplots(4, 12, figsize=(35, 7),
sharex=True,
sharey=True)
for i, (vic_var, variable_name) in enumerate(
var_names.items()):
# calculate monthly mean diurnal cycle
monthly_mean = {}
for (key, df) in data.items():
monthly_mean[key] = pd.DataFrame(
df[vic_var].groupby([df.index.month,
df.index.hour]).mean())
df = pd.DataFrame(
{key: d[vic_var] for key, d in monthly_mean.items()
if vic_var in d})
for j, month in enumerate(months):
for key, series in df.iteritems():
series[j + 1].plot(
linewidth=compare_data_dict[key]['linewidth'],
ax=axarr[i, j],
color=compare_data_dict[key]['color'],
linestyle=compare_data_dict[key]['linestyle'],
zorder=compare_data_dict[key]['zorder'])
axarr[i, j].set_ylabel(
'%s \n ($W/{m^2}$)' % variable_name,
size=fs)
axarr[i, j].set_xlabel('', size=fs)
axarr[i, j].set_xlim([0, 24])
axarr[i, j].xaxis.set_ticks(np.arange(0, 24, 3))
if i == 0:
axarr[i, j].set_title(month, size=fs)
# add legend
axarr[0, -1].legend(loc='center left',
bbox_to_anchor=(1, 0.5))
# add common x label
f.text(0.5, 0.04, 'Time of Day (Hour)', ha='center',
size=fs)
# save plot
plotname = '%s_%s.png' % (lat, lng)
os.makedirs(os.path.join(plot_dir, 'monthly_mean'),
exist_ok=True)
savepath = os.path.join(plot_dir,
'monthly_mean', plotname)
plt.savefig(savepath, bbox_inches='tight', dpi=dpi)
plt.clf()
plt.close()
| gpl-2.0 |
woobe/h2o | py/testdir_single_jvm/test_GLM2_covtype_train.py | 1 | 4705 | import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i, h2o_exec, h2o_glm, h2o_gbm, h2o_exec as h2e
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(node_count=1, java_heap_GB=10, base_port=54333)
else:
h2o_hosts.build_cloud_with_hosts(node_count=1, java_heap_GB=10)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_covtype_train(self):
h2o.beta_features = True
importFolderPath = "standard"
csvFilename = 'covtype.shuffled.data'
csvPathname = importFolderPath + "/" + csvFilename
hex_key = csvFilename + ".hex"
# Parse and Exec************************************************
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put', hex_key=hex_key, timeoutSecs=180)
execExpr="A.hex=%s" % parseResult['destination_key']
h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
# use exec to change the output col to binary, case_mode/case_val doesn't work if we use predict
# will have to live with random extract. will create variance
# class 4 = 1, everything else 0
y = 54
execExpr="A.hex[,%s]=(A.hex[,%s]==%s)" % (y+1, y+1, 4)
h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
inspect = h2o_cmd.runInspect(key="A.hex")
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
# Split Test/Train************************************************
# how many rows for each pct?
numRows = inspect['numRows']
pct10 = int(numRows * .1)
rowsForPct = [i * pct10 for i in range(0,11)]
# this can be slightly less than 10%
last10 = numRows - rowsForPct[9]
rowsForPct[10] = last10
# use mod below for picking "rows-to-do" in case we do more than 9 trials
# use 10 if 0 just to see (we copied 10 to 0 above)
rowsForPct[0] = rowsForPct[10]
print "Creating the key of the last 10% data, for scoring"
trainDataKey = "rTrain"
testDataKey = "rTest"
# start at 90% rows + 1
# GLM, predict, CM*******************************************************8
kwargs = {
'response': 'C' + str(y+1),
'max_iter': 20,
'n_folds': 0,
'alpha': 0.1,
'lambda': 1e-5,
'family': 'binomial',
}
timeoutSecs = 180
for trial in range(10):
# always slice from the beginning
rowsToUse = rowsForPct[trial%10]
# test/train split **********************************************8
h2o_cmd.createTestTrain(srcKey='A.hex', trainDstKey=trainDataKey, testDstKey=testDataKey, trainPercent=90)
aHack = {'destination_key': trainDataKey}
parseKey = trainDataKey
# GLM **********************************************8
start = time.time()
glm = h2o_cmd.runGLM(parseResult=aHack, timeoutSecs=timeoutSecs, pollTimeoutSecs=180, **kwargs)
print "glm end on ", parseResult['destination_key'], 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
modelKey = glm['glm_model']['_key']
# Score **********************************************
predictKey = 'Predict.hex'
start = time.time()
predictResult = h2o_cmd.runPredict(
data_key=testDataKey,
model_key=modelKey,
destination_key=predictKey,
timeoutSecs=timeoutSecs)
predictCMResult = h2o.nodes[0].predict_confusion_matrix(
actual=testDataKey,
vactual='C' + str(y+1),
predict=predictKey,
vpredict='predict',
)
cm = predictCMResult['cm']
# These will move into the h2o_gbm.py
pctWrong = h2o_gbm.pp_cm_summary(cm);
self.assertLess(pctWrong, 8,"Should see less than 7% error (class = 4)")
print "\nTest\n==========\n"
print h2o_gbm.pp_cm(cm)
print "Trial #", trial, "completed", "using %6.2f" % (rowsToUse*100.0/numRows), "pct. of all rows"
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
glewis17/fuel | fuel/converters/mnist.py | 18 | 6073 | import gzip
import os
import struct
import h5py
import numpy
from fuel.converters.base import fill_hdf5_file, check_exists
MNIST_IMAGE_MAGIC = 2051
MNIST_LABEL_MAGIC = 2049
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
ALL_FILES = [TRAIN_IMAGES, TRAIN_LABELS, TEST_IMAGES, TEST_LABELS]
@check_exists(required_files=ALL_FILES)
def convert_mnist(directory, output_directory, output_filename=None,
dtype=None):
"""Converts the MNIST dataset to HDF5.
Converts the MNIST dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.MNIST`. The converted dataset is
saved as 'mnist.hdf5'.
This method assumes the existence of the following files:
`train-images-idx3-ubyte.gz`, `train-labels-idx1-ubyte.gz`
`t10k-images-idx3-ubyte.gz`, `t10k-labels-idx1-ubyte.gz`
It assumes the existence of the following files:
* `train-images-idx3-ubyte.gz`
* `train-labels-idx1-ubyte.gz`
* `t10k-images-idx3-ubyte.gz`
* `t10k-labels-idx1-ubyte.gz`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to `None`, in which case a name
based on `dtype` will be used.
dtype : str, optional
Either 'float32', 'float64', or 'bool'. Defaults to `None`,
in which case images will be returned in their original
unsigned byte format.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
if not output_filename:
if dtype:
output_filename = 'mnist_{}.hdf5'.format(dtype)
else:
output_filename = 'mnist.hdf5'
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
train_feat_path = os.path.join(directory, TRAIN_IMAGES)
train_features = read_mnist_images(train_feat_path, dtype)
train_lab_path = os.path.join(directory, TRAIN_LABELS)
train_labels = read_mnist_labels(train_lab_path)
test_feat_path = os.path.join(directory, TEST_IMAGES)
test_features = read_mnist_images(test_feat_path, dtype)
test_lab_path = os.path.join(directory, TEST_LABELS)
test_labels = read_mnist_labels(test_lab_path)
data = (('train', 'features', train_features),
('train', 'targets', train_labels),
('test', 'features', test_features),
('test', 'targets', test_labels))
fill_hdf5_file(h5file, data)
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'channel'
h5file['features'].dims[2].label = 'height'
h5file['features'].dims[3].label = 'width'
h5file['targets'].dims[0].label = 'batch'
h5file['targets'].dims[1].label = 'index'
h5file.flush()
h5file.close()
return (output_path,)
def fill_subparser(subparser):
"""Sets up a subparser to convert the MNIST dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `mnist` command.
"""
subparser.add_argument(
"--dtype", help="dtype to save to; by default, images will be " +
"returned in their original unsigned byte format",
choices=('float32', 'float64', 'bool'), type=str, default=None)
return convert_mnist
def read_mnist_images(filename, dtype=None):
"""Read MNIST images from the original ubyte file format.
Parameters
----------
filename : str
Filename/path from which to read images.
dtype : 'float32', 'float64', or 'bool'
If unspecified, images will be returned in their original
unsigned byte format.
Returns
-------
images : :class:`~numpy.ndarray`, shape (n_images, 1, n_rows, n_cols)
An image array, with individual examples indexed along the
first axis and the image dimensions along the second and
third axis.
Notes
-----
If the dtype provided was Boolean, the resulting array will
be Boolean with `True` if the corresponding pixel had a value
greater than or equal to 128, `False` otherwise.
If the dtype provided was a float dtype, the values will be mapped to
the unit interval [0, 1], with pixel values that were 255 in the
original unsigned byte representation equal to 1.0.
"""
with gzip.open(filename, 'rb') as f:
magic, number, rows, cols = struct.unpack('>iiii', f.read(16))
if magic != MNIST_IMAGE_MAGIC:
raise ValueError("Wrong magic number reading MNIST image file")
array = numpy.frombuffer(f.read(), dtype='uint8')
array = array.reshape((number, 1, rows, cols))
if dtype:
dtype = numpy.dtype(dtype)
if dtype.kind == 'b':
# If the user wants Booleans, threshold at half the range.
array = array >= 128
elif dtype.kind == 'f':
# Otherwise, just convert.
array = array.astype(dtype)
array /= 255.
else:
raise ValueError("Unknown dtype to convert MNIST to")
return array
def read_mnist_labels(filename):
"""Read MNIST labels from the original ubyte file format.
Parameters
----------
filename : str
Filename/path from which to read labels.
Returns
-------
labels : :class:`~numpy.ndarray`, shape (nlabels, 1)
A one-dimensional unsigned byte array containing the
labels as integers.
"""
with gzip.open(filename, 'rb') as f:
magic, _ = struct.unpack('>ii', f.read(8))
if magic != MNIST_LABEL_MAGIC:
raise ValueError("Wrong magic number reading MNIST label file")
array = numpy.frombuffer(f.read(), dtype='uint8')
array = array.reshape(array.size, 1)
return array
| mit |
Unode/ete | ete3/test/test_tree.py | 1 | 71317 | from __future__ import absolute_import
from __future__ import print_function
import unittest
import random
import itertools
import sys
from six.moves import range
from .. import Tree, PhyloTree, TreeNode
from ..coretype.tree import TreeError
from ..parser.newick import NewickError
from .datasets import *
class Test_Coretype_Tree(unittest.TestCase):
""" Tests tree basics. """
def test_read_write_exceptions(self):
def wrong_dist():
t = Tree()
t.dist = '1a'
def wrong_support():
t = Tree()
t.support = '1a'
def wrong_up():
t = Tree()
t.up = 'Something'
def wrong_children():
t = Tree()
t.children = 'Something'
self.assertRaises(TreeError, wrong_dist)
self.assertRaises(TreeError, wrong_support)
self.assertRaises(TreeError, wrong_up)
self.assertRaises(TreeError, wrong_children)
def test_add_remove_features(self):
#The features concept will probably change in future versions. It is
#very inefficient in larg trees.
t = Tree()
t.add_features(testf1=1, testf2="1", testf3=[1])
t.add_feature('testf4', set([1]))
self.assertEqual(t.testf1, 1)
self.assertEqual(t.testf2, "1")
self.assertEqual(t.testf3, [1])
self.assertEqual(t.testf4, set([1]))
t.del_feature('testf4')
self.assertTrue('testf4' not in t.features)
def test_tree_read_and_write(self):
""" Tests newick support """
# Read and write newick tree from file (and support for NHX
# format): newick parser
open("/tmp/etetemptree.nw","w").write(nw_full)
t = Tree("/tmp/etetemptree.nw")
t.write(outfile='/tmp/etewritetest.nw')
self.assertEqual(nw_full, t.write(features=["flag","mood"]))
self.assertEqual(nw_topo, t.write(format=9))
self.assertEqual(nw_dist, t.write(format=5))
# Read and write newick tree from *string* (and support for NHX
# format)
t = Tree(nw_full)
self.assertEqual(nw_full, t.write(features=["flag","mood"]))
self.assertEqual(nw_topo, t.write(format=9))
self.assertEqual( nw_dist, t.write(format=5))
# Read complex newick
t = Tree(nw2_full)
self.assertEqual(nw2_full, t.write())
# Read wierd topologies
t = Tree(nw_simple5)
self.assertEqual(nw_simple5, t.write(format=9))
t = Tree(nw_simple6)
self.assertEqual(nw_simple6, t.write(format=9))
#Read single node trees:
self.assertEqual(Tree("hola;").write(format=9), "hola;")
self.assertEqual(Tree("(hola);").write(format=9), "(hola);")
#Test export root features
t = Tree("(((A[&&NHX:name=A],B[&&NHX:name=B])[&&NHX:name=NoName],C[&&NHX:name=C])[&&NHX:name=I],(D[&&NHX:name=D],F[&&NHX:name=F])[&&NHX:name=J])[&&NHX:name=root];")
#print t.get_ascii()
self.assertEqual(t.write(format=9, features=["name"], format_root_node=True),
"(((A[&&NHX:name=A],B[&&NHX:name=B])[&&NHX:name=NoName],C[&&NHX:name=C])[&&NHX:name=I],(D[&&NHX:name=D],F[&&NHX:name=F])[&&NHX:name=J])[&&NHX:name=root];")
#Test exporting ordered features
t = Tree("((A,B),C);")
expected_nw = "((A:1[&&NHX:dist=1.0:name=A:support=1.0],B:1[&&NHX:0=0:1=1:2=2:3=3:4=4:5=5:6=6:7=7:8=8:9=9:a=a:b=b:c=c:d=d:dist=1.0:e=e:f=f:g=g:h=h:i=i:j=j:k=k:l=l:m=m:n=n:name=B:o=o:p=p:q=q:r=r:s=s:support=1.0:t=t:u=u:v=v:w=w])1:1[&&NHX:dist=1.0:name=:support=1.0],C:1[&&NHX:dist=1.0:name=C:support=1.0]);"
features = list("abcdefghijklmnopqrstuvw0123456789")
random.shuffle(features)
for letter in features:
(t & "B").add_feature(letter, letter)
self.assertEqual(expected_nw, t.write(features=[]))
# Node instance repr
self.assertTrue(Tree().__repr__().startswith('Tree node'))
def test_concat_trees(self):
t1 = Tree('((A, B), C);')
t2 = Tree('((a, b), c);')
concat_tree = t1 + t2
concat_tree.sort_descendants()
self.assertEqual(concat_tree.write(format=9), '(((A,B),C),((a,b),c));')
t3 = PhyloTree('((a, b), c);')
mixed_types = lambda: t1 + t3
self.assertRaises(TreeError, mixed_types)
def test_newick_formats(self):
""" tests different newick subformats """
from ..parser.newick import print_supported_formats, NW_FORMAT
print_supported_formats()
# Let's stress a bit
for i in range(10):
t = Tree()
t.populate(4, random_branches=True)
for f in NW_FORMAT:
self.assertEqual(t.write(format=f), Tree(t.write(format=f),format=f).write(format=f))
# Format 0 = ((H:1,(G:1,F:1)1:1)1:1,I:1)1:1;
# Format 1 = ((H:1,(G:1,F:1):1):1,I:1):1;
# Format 2 = ((H:1,(G:1,F:1)1:1)1:1,I:1)1:1;
# Format 3 = ((H:1,(G:1,F:1)NoName:1)NoName:1,I:1)NoName:1;
# Format 4 = ((H:1,(G:1,F:1)),I:1);
# Format 5 = ((H:1,(G:1,F:1):1):1,I:1):1;
# Format 6 = ((H,(G,F):1):1,I):1;
# Format 7 = ((H:1,(G:1,F:1)NoName)NoName,I:1)NoName;
# Format 8 = ((H,(G,F)NoName)NoName,I)NoName;
# Format 9 = ((H,(G,F)),I);
# Format 100 = ((,(,)),);
t = Tree()
t.populate(50, random_branches=True)
t.sort_descendants()
expected_distances = [round(x, 6) for x in [n.dist for n in t.traverse('postorder')]]
expected_leaf_distances = [round(x, 6) for x in [n.dist for n in t]]
expected_internal_distances = [round(x, 6) for x in [n.dist for n in t.traverse('postorder') if not n.is_leaf()]]
expected_supports = [round(x, 6) for x in [n.support for n in t.traverse('postorder') if not n.is_leaf()]]
expected_leaf_names = [n.name for n in t]
# Check that all formats read names correctly
for f in [0,1,2,3,5,6,7,8,9]:
t2 = Tree(t.write(format=f, dist_formatter="%0.6f", support_formatter="%0.6f", format_root_node=True), format=f)
t2.sort_descendants()
observed_names = [n.name for n in t]
self.assertEqual(observed_names, expected_leaf_names)
# Check that all formats reading distances, recover original distances
for f in [0,1,2,3,5]:
t2 = Tree(t.write(format=f, dist_formatter="%0.6f", support_formatter="%0.6f", format_root_node=True), format=f)
t2.sort_descendants()
observed_distances = [round(x, 6) for x in [n.dist for n in t2.traverse('postorder')]]
self.assertEqual(observed_distances, expected_distances)
# formats reading only leaf distances
for f in [4,7]:
t2 = Tree(t.write(format=f, dist_formatter="%0.6f", support_formatter="%0.6f", format_root_node=True), format=f)
t2.sort_descendants()
observed_distances = [round(x, 6) for x in [n.dist for n in t2]]
self.assertEqual(observed_distances, expected_leaf_distances)
# formats reading only leaf distances
for f in [6]:
t2 = Tree(t.write(format=f, dist_formatter="%0.6f", support_formatter="%0.6f", format_root_node=True), format=f)
t2.sort_descendants()
observed_distances = [round(x, 6) for x in [n.dist for n in t2.traverse('postorder') if not n.is_leaf()]]
self.assertEqual(observed_distances, expected_internal_distances)
# Check that all formats reading supports, recover original distances
#print t.get_ascii(attributes=["support"])
for f in [0,2]:
t2 = Tree(t.write(format=f, dist_formatter="%0.6f", support_formatter="%0.6f", format_root_node=True), format=f)
t2.sort_descendants()
observed_supports = [round(x, 6) for x in [n.support for n in t2.traverse('postorder') if not n.is_leaf()]]
self.assertEqual(observed_supports, expected_supports)
# Check that formats reading supports, do not accept node names
for f in [0,2]:
# format 3 forces dumping internal node names, NoName in case is missing
self.assertRaises(Exception, Tree, t.write(format=3), format=f)
# Check that formats reading names, do not load supports
for f in [1, 3]:
# format 3 forces dumping internal node names, NoName in case is missing
t2 = Tree(t.write(format=0), format=f)
default_supports = set([n.support for n in t2.traverse()])
self.assertEqual(set([1.0]), default_supports)
# Check errors reading numbers
error_nw1 = "((A:0.813705,(E:0.545591,D:0.411772)error:0.137245)1.000000:0.976306,C:0.074268);"
for f in [0, 2]:
self.assertRaises(NewickError, Tree, error_nw1, format=f)
error_nw2 = "((A:0.813705,(E:0.545error,D:0.411772)1.0:0.137245)1.000000:0.976306,C:0.074268);"
for f in [0, 1, 2]:
self.assertRaises(NewickError, Tree, error_nw2, format=f)
error_nw3 = "((A:0.813705,(E:0.545error,D:0.411772)1.0:0.137245)1.000000:0.976306,C:0.074268);"
for f in [0, 1, 2]:
self.assertRaises(NewickError, Tree, error_nw2, format=f)
# Check errors derived from reading names with weird or illegal chars
base_nw = "((NAME1:0.813705,(NAME2:0.545,NAME3:0.411772)NAME6:0.137245)NAME5:0.976306,NAME4:0.074268);"
valid_names = ['[name]', '[name', '"name"', "'name'", "'name", 'name', '[]\'"&%$!*.']
error_names = ['error)', '(error', "erro()r", ":error", "error:", "err:or", ",error", "error,"]
for ename in error_names:
#print ename, base_nw.replace('NAME2', ename)
self.assertRaises(NewickError, Tree, base_nw.replace('NAME2', ename), format=1)
if not ename.startswith(','):
#print ename, base_nw.replace('NAME6', ename)
self.assertRaises(NewickError, Tree, base_nw.replace('NAME6', ename), format=1)
for vname in valid_names:
expected_names = set(['NAME1', vname, 'NAME3', 'NAME4'])
#print set([n.name for n in Tree(base_nw.replace('NAME2', vname), format=1)])
self.assertEqual(set([n.name for n in Tree(base_nw.replace('NAME2', vname), format=1)]),
expected_names)
# invalid NHX format
self.assertRaises(NewickError, Tree, "(((A, B), C)[&&NHX:nameI]);")
# unsupported newick stream
self.assertRaises(NewickError, Tree, [1,2,3])
def test_quoted_names(self):
complex_name = "((A:0.0001[&&NHX:hello=true],B:0.011)90:0.01[&&NHX:hello=true],(C:0.01, D:0.001)hello:0.01);"
# A quoted tree within a tree
nw1 = '(("A:0.1":1,"%s":2)"C:0.00":3,"D":4);' %complex_name
#escaped quotes
nw2 = '''(("A:\\"0.1\\"":1,"%s":2)"C:'0.00'":3,"D'sd''\'":4);''' %complex_name
for nw in [nw1, nw2]:
self.assertRaises(NewickError, Tree, newick=nw)
self.assertRaises(NewickError, Tree, newick=nw, quoted_node_names=True, format=0)
t = Tree(newick=nw, format=1, quoted_node_names=True)
self.assertTrue(any(n for n in t if n.name == '%s'%complex_name))
# test writing and reloading tree
nw_back = t.write(quoted_node_names=True, format=1)
t2 = Tree(newick=nw, format=1, quoted_node_names=True)
nw_back2 = t2.write(quoted_node_names=True, format=1)
self.assertEqual(nw, nw_back)
self.assertEqual(nw, nw_back2)
def test_custom_formatting_formats(self):
""" test to change dist, name and support formatters """
t = Tree('((A:1.111111, B:2.222222)C:3.33333, D:4.44444);', format=1)
t.sort_descendants()
check = [[0, '((TEST-A:1.1,TEST-B:2.2)SUP-1.0:3.3,TEST-D:4.4);'],
[1, '((TEST-A:1.1,TEST-B:2.2)TEST-C:3.3,TEST-D:4.4);'],
[2, '((TEST-A:1.1,TEST-B:2.2)SUP-1.0:3.3,TEST-D:4.4);'],
[3, '((TEST-A:1.1,TEST-B:2.2)TEST-C:3.3,TEST-D:4.4);'],
[4, '((TEST-A:1.1,TEST-B:2.2),TEST-D:4.4);'],
[5, '((TEST-A:1.1,TEST-B:2.2):3.3,TEST-D:4.4);'],
[6, '((TEST-A,TEST-B):3.3,TEST-D);'],
[7, '((TEST-A:1.1,TEST-B:2.2)TEST-C,TEST-D:4.4);'],
[8, '((TEST-A,TEST-B)TEST-C,TEST-D);'],
[9, '((TEST-A,TEST-B),TEST-D);']]
for f, result in check:
nw = t.write(format=f, dist_formatter="%0.1f", name_formatter="TEST-%s", support_formatter="SUP-%0.1f")
self.assertEqual(nw, result)
def test_tree_manipulation(self):
""" tests operations which modify tree topology """
nw_tree = "((Hola:1,Turtle:1.3)1:1,(A:0.3,B:2.4)1:0.43);"
# Manipulate Topologies
# Adding and removing nodes (add_child, remove_child,
# add_sister, remove_sister). The resulting newick tree should
# match the nw_tree defined before.
t = Tree()
remove_child_except = lambda: t.remove_child(t)
add_sister_except = lambda: t.add_sister()
self.assertRaises(TreeError, remove_child_except)
self.assertRaises(TreeError, add_sister_except)
c1 = t.add_child(dist=1, support=1)
c2 = t.add_child(dist=0.43, support=1)
n = TreeNode(name="Hola", dist=1, support=1)
_n = c1.add_child(n)
c3 = _n.add_sister(name="Turtle", dist="1.3")
c4 = c2.add_child(name="A", dist="0.3")
c5 = c2.add_child(name="todelete")
_c5 = c2.remove_child(c5)
c6 = c2.add_child(name="todelete")
_c6 = c4.remove_sister()
c7 = c2.add_child(name="B", dist=2.4)
self.assertEqual(nw_tree, t.write())
self.assertEqual(_c5, c5)
self.assertEqual(_c6, c6)
self.assertEqual(_n, n)
# Delete,
t = Tree("(((A, B), C)[&&NHX:name=I], (D, F)[&&NHX:name=J])[&&NHX:name=root];")
D = t.search_nodes(name="D")[0]
F = t.search_nodes(name="F")[0]
J = t.search_nodes(name="J")[0]
root = t.search_nodes(name="root")[0]
J.delete()
self.assertEqual(J.up, None)
self.assertEqual(J in t, False)
self.assertEqual(D.up, root)
self.assertEqual(F.up, root)
# Delete preventing non dicotomic
t = Tree('((((A:1,B:1):1,C:1):1,D:1):1,E:1);')
orig_dist = t.get_distance('A')
C = t&('C')
C.delete(preserve_branch_length=True)
self.assertEqual(orig_dist, t.get_distance('A'))
t = Tree('((((A:1,B:1):1,C:1):1,D:1):1,E:1);')
orig_dist = t.get_distance('A')
C = t&('C')
C.delete(preserve_branch_length=False)
self.assertEqual(orig_dist, t.get_distance('A')+1)
t = Tree('((((A:1,B:1):1,C:1):1,D:1):1,E:1);')
orig_dist = t.get_distance('A')
C = t&('C')
C.delete(prevent_nondicotomic=False)
self.assertEqual(orig_dist, t.get_distance('A'))
#detach
t = Tree("(((A, B)[&&NHX:name=H], C)[&&NHX:name=I], (D, F)[&&NHX:name=J])[&&NHX:name=root];")
D = t.search_nodes(name="D")[0]
F = t.search_nodes(name="F")[0]
J = t.search_nodes(name="J")[0]
root = t.search_nodes(name="root")[0]
J.detach()
self.assertEqual(J.up, None)
self.assertEqual(J in t, False)
self.assertEqual(set([n.name for n in t.iter_descendants()]),set(["A","B","C","I","H"]))
# sorting branches
t1 = Tree('((A,B),(C,D,E,F), (G,H,I));')
t1.ladderize()
self.assertEqual(t1.get_leaf_names(), [_ for _ in 'ABGHICDEF'])
t1.ladderize(direction=1)
self.assertEqual(t1.get_leaf_names(), [_ for _ in 'FEDCIHGBA'])
t1.sort_descendants()
self.assertEqual(t1.get_leaf_names(), [_ for _ in 'ABCDEFGHI'])
# prune
t1 = Tree("(((A, B), C)[&&NHX:name=I], (D, F)[&&NHX:name=J])[&&NHX:name=root];")
D1 = t1.search_nodes(name="D")[0]
t1.prune(["A","C", D1])
sys.stdout.flush()
self.assertEqual(set([n.name for n in t1.iter_descendants()]), set(["A","C","D","I"]))
t1 = Tree("(((A, B), C)[&&NHX:name=I], (D, F)[&&NHX:name=J])[&&NHX:name=root];")
D1 = t1.search_nodes(name="D")[0]
t1.prune(["A","B"])
self.assertEqual( t1.write(), "(A:1,B:1);")
# test prune keeping internal nodes
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
#print t1.get_ascii()
t1.prune(['A', 'B', 'F', 'H'])
#print t1.get_ascii()
self.assertEqual(set([n.name for n in t1.traverse()]),
set(['A', 'B', 'F', 'H', 'root']))
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
#print t1.get_ascii()
t1.prune(['A', 'B'])
#print t1.get_ascii()
self.assertEqual(set([n.name for n in t1.traverse()]),
set(['A', 'B', 'root']))
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
#print t1.get_ascii()
t1.prune(['A', 'B', 'C'])
#print t1.get_ascii()
self.assertEqual(set([n.name for n in t1.traverse()]),
set(['A', 'B', 'C', 'root']))
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
#print t1.get_ascii()
t1.prune(['A', 'B', 'I'])
#print t1.get_ascii()
self.assertEqual(set([n.name for n in t1.traverse()]),
set(['A', 'B', 'C', 'I', 'root']))
def test_pruninig(self):
# test prune preserving distances
for i in range(10):
t = Tree()
t.populate(40, random_branches=True)
orig_nw = t.write()
distances = {}
for a in t.iter_leaves():
for b in t.iter_leaves():
distances[(a,b)] = round(a.get_distance(b), 6)
to_keep = set(random.sample(t.get_leaves(), 6))
t.prune(to_keep, preserve_branch_length=True)
for a,b in distances:
if a in to_keep and b in to_keep:
self.assertEqual(distances[(a,b)], round(a.get_distance(b), 6))
# Total number of nodes is correct (no single child nodes)
for x in range(10):
t_fuzzy = Tree("(((A,B)1, C)2,(D,E)3)root;", format=1)
t_fuzzy.sort_descendants()
orig_nw = t_fuzzy.write()
ref_nodes = t_fuzzy.get_leaves()
t_fuzzy.populate(10)
(t_fuzzy&'1').populate(3)
(t_fuzzy&'2').populate(5)
(t_fuzzy&'3').populate(5)
t_fuzzy.prune(ref_nodes)
t_fuzzy.sort_descendants()
self.assertEqual(orig_nw, t_fuzzy.write())
self.assertEqual(len(t_fuzzy.get_descendants()), (len(ref_nodes)*2)-2 )
# Total number of nodes is correct (no single child nodes)
t = Tree()
sample_size = 5
t.populate(1000)
sample = random.sample(t.get_leaves(), sample_size)
t.prune(sample)
self.assertEqual(len(t), sample_size)
self.assertEqual(len(t.get_descendants()), (sample_size*2)-2 )
# Test preserve branch dist when pruning
t = Tree()
t.populate(100, random_branches=True)
orig_leaves = t.get_leaves()
sample_size = 50
sample= random.sample(t.get_leaves(), sample_size)
matrix1 = ["%f" %t.get_distance(a, b) for (a,b) in itertools.product(sample, sample)]
t.prune(sample, preserve_branch_length=True)
matrix2 = ["%f" %t.get_distance(a, b) for (a,b)in itertools.product(sample, sample)]
self.assertEqual(matrix1, matrix2)
self.assertEqual(len(t.get_descendants()), (sample_size*2)-2 )
def test_resolve_polytomies(self):
# resolve polytomy
t = Tree("((a,a,a,a), (b,b,b,(c,c,c)));")
t.resolve_polytomy()
t.ladderize()
self.assertEqual(t.write(format=9), "((a,(a,(a,a))),(b,(b,(b,(c,(c,c))))));")
t = Tree("((((a,a,a,a))), (b,b,b,(c,c,c)));")
t.standardize()
t.ladderize()
self.assertEqual(t.write(format=9), "((a,(a,(a,a))),(b,(b,(b,(c,(c,c))))));")
def test_common_ancestors(self):
# getting nodes, get_childs, get_sisters, get_tree_root,
# get_common_ancestor, get_nodes_by_name
# get_descendants_by_name, is_leaf, is_root
t = Tree("(((A,B),C)[&&NHX:tag=common],D)[&&NHX:tag=root:name=root];")
self.assertEqual(t.get_sisters(), [])
A = t.search_nodes(name="A")[0]
B = t.search_nodes(name="B")[0]
C = t.search_nodes(name="C")[0]
root = (t&"root")
self.assertEqual("A", A.name)
test_not_found = lambda: t&'noffound'
self.assertRaises(TreeError, test_not_found)
self.assertEqual("common", A.get_common_ancestor(C).tag)
self.assertEqual("common", A.get_common_ancestor([C]).tag)
self.assertEqual("common", t.get_common_ancestor(A, C).tag)
self.assertEqual("common", A.get_common_ancestor(C, B).tag)
self.assertEqual(root, t.get_common_ancestor([A, "D"]))
self.assertEqual("root", A.get_tree_root().tag)
self.assertEqual("root", B.get_tree_root().tag)
self.assertEqual("root", C.get_tree_root().tag)
common = A.get_common_ancestor(C)
self.assertEqual("root", common.get_tree_root().tag)
self.assert_(common.get_tree_root().is_root())
self.assert_(not A.is_root())
self.assert_(A.is_leaf())
self.assert_(not A.get_tree_root().is_leaf())
self.assertRaises(TreeError, A.get_common_ancestor, Tree())
def test_getters_iters(self):
# Iter ancestors
t = Tree("(((((a,b)A,c)B,d)C,e)D,f)root;", format=1)
ancestor_names = [n.name for n in (t&"a").get_ancestors()]
self.assertEqual(ancestor_names, ["A", "B", "C", "D", "root"])
ancestor_names = [n.name for n in (t&"B").get_ancestors()]
self.assertEqual(ancestor_names, ["C", "D", "root"])
# Tree magic python features
t = Tree(nw_dflt)
self.assertEqual(len(t), 20)
self.assert_("Ddi0002240" in t)
self.assert_(t.children[0] in t)
for a in t:
self.assert_(a.name)
# Populate
t = Tree(nw_full)
prev_size= len(t)
t.populate(25)
self.assertEqual(len(t), prev_size+25)
for i in range(10):
t = Tree()
t.populate(100, reuse_names=False)
# Checks that all names are actually unique
self.assertEqual(len(set(t.get_leaf_names())), 100)
# Adding and removing features
t = Tree("(((A,B),C)[&&NHX:tag=common],D)[&&NHX:tag=root];")
A = t.search_nodes(name="A")[0]
# Check gettters and itters return the same
t = Tree(nw2_full)
self.assert_(t.get_leaf_names(), [name for name in t.iter_leaf_names()])
self.assert_(t.get_leaves(), [name for name in t.iter_leaves()])
self.assert_(t.get_descendants(), [n for n in t.iter_descendants()])
self.assertEqual(set([n for n in t.traverse("preorder")]), \
set([n for n in t.traverse("postorder")]))
self.assert_(t in set([n for n in t.traverse("preorder")]))
# Check order or visiting nodes
t = Tree("((3,4)2,(6,7)5)1;", format=1)
#t = Tree("(((A, B)C, (D, E)F)G, (H, (I, J)K)L)M;", format=1)
#postorder = [c for c in "ABCDEFGHIJKLM"]
#preorder = [c for c in reversed(postorder)]
#levelorder = [c for c in "MGLCFHKABDEIJ"]
postorder = "3426751"
preorder = "1234567"
levelorder = "1253467"
self.assertEqual(preorder,
''.join([n.name for n in t.traverse("preorder")]))
self.assertEqual(postorder,
''.join([n.name for n in t.traverse("postorder")]))
self.assertEqual(levelorder,
''.join([n.name for n in t.traverse("levelorder")]))
# Swap childs
n = t.get_children()
t.swap_children()
n.reverse()
self.assertEqual(n, t.get_children())
def test_distances(self):
# Distances: get_distance, get_farthest_node,
# get_farthest_descendant, get_midpoint_outgroup
t = Tree("(((A:0.1, B:0.01):0.001, C:0.0001):1.0[&&NHX:name=I], (D:0.00001):0.000001[&&NHX:name=J]):2.0[&&NHX:name=root];")
A = t.search_nodes(name="A")[0]
B = t.search_nodes(name="B")[0]
C = t.search_nodes(name="C")[0]
D = t.search_nodes(name="D")[0]
I = t.search_nodes(name="I")[0]
J = t.search_nodes(name="J")[0]
root = t.search_nodes(name="root")[0]
self.assertEqual(A.get_common_ancestor(I).name, "I")
self.assertEqual(A.get_common_ancestor(D).name, "root")
self.assertEqual(A.get_distance(I), 0.101)
self.assertEqual(A.get_distance(B), 0.11)
self.assertEqual(A.get_distance(A), 0)
self.assertEqual(I.get_distance(I), 0)
self.assertEqual(A.get_distance(root), root.get_distance(A))
self.assertEqual(t.get_distance(A, root), root.get_distance(A))
self.assertEqual(t.get_distance(root, A), A.get_distance(root))
# Get_farthest_node, get_farthest_leaf
self.assertEqual(root.get_farthest_leaf(), (A,1.101) )
self.assertEqual(root.get_farthest_node(), (A,1.101) )
self.assertEqual(A.get_farthest_leaf(), (A, 0.0))
self.assertEqual(A.get_farthest_node(), (D, 1.101011))
self.assertEqual(I.get_farthest_node(), (D, 1.000011))
# Topology only distances
t = Tree('(((A:0.5, B:1.0):1.0, C:5.0):1, (D:10.0, F:1.0):2.0):20;')
self.assertEqual(t.get_closest_leaf(), (t&'A', 2.5))
self.assertEqual(t.get_farthest_leaf(), (t&'D', 12.0))
self.assertEqual(t.get_farthest_leaf(topology_only=True), (t&'A', 2.0))
self.assertEqual(t.get_closest_leaf(topology_only=True), (t&'C', 1.0))
self.assertEqual(t.get_distance(t), 0.0)
self.assertEqual(t.get_distance(t, topology_only=True), 0.0)
self.assertEqual(t.get_distance(t&'A', topology_only=True), 2.0)
self.assertEqual((t&'F').get_farthest_node(topology_only=True), (t&'A', 3.0))
self.assertEqual((t&'F').get_farthest_node(topology_only=False), (t&'D', 11.0))
def test_rooting(self):
# Test set_outgroup and get_midpoint_outgroup
t = Tree(nw2_full)
YGR028W = t.get_leaves_by_name("YGR028W")[0]
YGR138C = t.get_leaves_by_name("YGR138C")[0]
d1 = YGR138C.get_distance(YGR028W)
nodes = t.get_descendants()
t.set_outgroup(t.get_midpoint_outgroup())
o1, o2 = t.children[0], t.children[1]
nw_original = t.write()
d2 = YGR138C.get_distance(YGR028W)
self.assertEqual(d1, d2)
# Randomizing outgroup test: Can we recover original state
# after many manipulations?
for i in range(10):
for j in range(1000):
n = random.sample(nodes, 1)[0]
t.set_outgroup(n)
t.set_outgroup(t.get_midpoint_outgroup())
self.assertEqual(set([t.children[0], t.children[1]]), set([o1, o2]))
## I need to sort branches first
#self.assertEqual(t.write(), nw_original)
d3 = YGR138C.get_distance(YGR028W)
self.assertEqual(d1, d3)
t = Tree('(A,B,(C,D)E)root;', format=1);
t.sort_descendants()
nw_unrooted = t.write()
t.set_outgroup(t.get_common_ancestor('C', 'D'));
t.unroot()
t.sort_descendants()
self.assertEqual(nw_unrooted, t.write())
t = Tree('(A:10,B:1,(C:1,D:1)E:1)root;', format=1);
t.set_outgroup(t.get_midpoint_outgroup())
self.assertEqual(t.children[0].dist, 5.0)
self.assertEqual(t.children[1].dist, 5.0)
def test_unroot(self):
t = Tree("(('a':0.5, 'b':0.5):0.5, ('c':0.2, 'd':0.2):0.8):1;" )
t2 = Tree("(('a':0.5, 'b':0.5):0.5, ('c':0.2, 'd':0.2):0.8):1;" )
t.unroot(mode="keep")
with self.assertRaises(ValueError):
t.unroot(mode="new")
t2.unroot(mode="legacy")
self.assertEqual("(('c':0.2,'d':0.2)1:1.3,'a':0.5,'b':0.5);", t.write())
self.assertEqual("(('c':0.2,'d':0.2)1:0.8,'a':0.5,'b':0.5);", t2.write())
def test_tree_navigation(self):
t = Tree("(((A, B)H, C)I, (D, F)J)root;", format=1)
postorder = [n.name for n in t.traverse("postorder")]
preorder = [n.name for n in t.traverse("preorder")]
levelorder = [n.name for n in t.traverse("levelorder")]
self.assertEqual(postorder, ['A', 'B', 'H', 'C', 'I', 'D', 'F', 'J', 'root'])
self.assertEqual(preorder, ['root', 'I', 'H', 'A', 'B', 'C', 'J', 'D', 'F'])
self.assertEqual(levelorder, ['root', 'I', 'J', 'H', 'C', 'D', 'F', 'A', 'B'])
ancestors = [n.name for n in (t&"B").get_ancestors()]
self.assertEqual(ancestors, ["H", "I", "root"])
self.assertEqual(t.get_ancestors(), [])
# add something of is_leaf_fn etc...
custom_test = lambda x: x.name in set("JCH")
custom_leaves = t.get_leaves(is_leaf_fn=custom_test)
self.assertEqual(set([n.name for n in custom_leaves]), set("JHC"))
# Test cached content
t = Tree()
t.populate(20)
cache_node = t.get_cached_content()
cache_node_leaves_only_false = t.get_cached_content(leaves_only=False)
self.assertEqual(cache_node[t], set(t.get_leaves()))
self.assertEqual(cache_node_leaves_only_false[t], set(t.traverse()))
cache_name = t.get_cached_content(store_attr="name")
cache_name_leaves_only_false = t.get_cached_content(store_attr="name", leaves_only=False)
self.assertEqual(cache_name[t], set(t.get_leaf_names()))
self.assertEqual(cache_name_leaves_only_false[t], set([n.name for n in t.traverse()]))
cache_many = t.get_cached_content(store_attr=["name", "dist", "support"])
cache_many_lof = t.get_cached_content(store_attr=["name", "dist", "support"], leaves_only=False)
self.assertEqual(cache_many[t], set([(leaf.name, leaf.dist, leaf.support) for leaf in t.get_leaves()]))
self.assertEqual(cache_many_lof[t], set([(n.name, n.dist, n.support) for n in t.traverse()]))
#self.assertEqual(cache_name_lof[t], [t.name])
def test_rooting(self):
""" Check branch support and distances after rooting """
t = Tree("((((a,b)1,c)2,i)3,(e,d)4)5;", format=1)
t.set_outgroup(t&"a")
t = Tree("(((a,b)2,c)x)9;", format=1)
t.set_outgroup(t&"a")
# Test branch support and distances after rooting
SIZE = 35
t = Tree()
t.populate(SIZE, reuse_names=False)
t.unroot()
for n in t.iter_descendants():
if n is not t:
n.support = random.random()
n.dist = random.random()
for n in t.children:
n.support = 0.999
t2 = t.copy()
names = set(t.get_leaf_names())
cluster_id2support = {}
cluster_id2dist = {}
for n in t.traverse():
cluster_names = set(n.get_leaf_names())
cluster_names2 = names - cluster_names
cluster_id = '_'.join(sorted(cluster_names))
cluster_id2 = '_'.join(sorted(cluster_names2))
cluster_id2support[cluster_id] = n.support
cluster_id2support[cluster_id2] = n.support
cluster_id2dist[cluster_id] = n.dist
cluster_id2dist[cluster_id2] = n.dist
for i in range(100):
outgroup = random.sample(t2.get_descendants(), 1)[0]
t2.set_outgroup(outgroup)
for n in t2.traverse():
cluster_names = set(n.get_leaf_names())
cluster_names2 = names - cluster_names
cluster_id = '_'.join(sorted(cluster_names))
cluster_id2 = '_'.join(sorted(cluster_names2))
self.assertEqual(cluster_id2support.get(cluster_id, None), n.support)
self.assertEqual(cluster_id2support.get(cluster_id2, None), n.support)
if n.up and n.up.up:
self.assertEqual(cluster_id2dist.get(cluster_id, None), n.dist)
# Test unrooting
t = Tree()
t.populate(20)
t.unroot()
# Printing and info
text = t.get_ascii()
Tree().describe()
Tree('(a,b,c);').describe()
Tree('(a,(b,c));').describe()
def test_treeid(self):
t = Tree()
t.populate(50, random_branches=True)
orig_id = t.get_topology_id()
nodes = t.get_descendants()
for i in range(20):
for n in random.sample(nodes, 10):
n.swap_children()
self.assertEqual(t.get_topology_id(), orig_id)
def test_ultrametric(self):
# Convert tree to a ultrametric topology in which distance from
# leaf to root is always 100. Two strategies are available:
# balanced or fixed
t = Tree()
t.populate(100, random_branches=True)
t.convert_to_ultrametric(100, "balanced")
self.assertEqual(set([round(t.get_distance(n), 6) for n in t]), set([100.0]))
t = Tree()
t.populate(100, random_branches=True)
t.convert_to_ultrametric(100, "fixed")
self.assertEqual(set([round(t.get_distance(n), 6) for n in t]), set([100.0]))
t = Tree()
t.populate(100, random_branches=True)
t.convert_to_ultrametric(100, "balanced")
self.assertEqual(set([round(t.get_distance(n), 6) for n in t]), set([100.0]))
def test_expand_polytomies_rf(self):
gtree = Tree('((a:1, (b:1, (c:1, d:1):1):1), (e:1, (f:1, g:1):1):1);')
ref1 = Tree('((a:1, (b:1, c:1, d:1):1):1, (e:1, (f:1, g:1):1):1);')
ref2 = Tree('((a:1, (b:1, c:1, d:1):1):1, (e:1, f:1, g:1):1);')
for ref in [ref1, ref2]:
#print gtree, ref
gtree.robinson_foulds(ref, expand_polytomies=True)[0]
gtree = Tree('((g, h), (a, (b, (c, (d,( e, f))))));')
ref3 = Tree('((a, b, c, (d, e, f)), (g, h));')
ref4 = Tree('((a, b, c, d, e, f), (g, h));')
ref5 = Tree('((a, b, (c, d, (e, f))), (g, h));')
for ref in [ref3, ref4, ref5]:
#print gtree, ref
gtree.robinson_foulds(ref, expand_polytomies=True, polytomy_size_limit=8)[0]
gtree = Tree('((g, h), (a, b, (c, d, (e, f))));')
ref6 = Tree('((a, b, (c, d, e, f)), (g, h));')
ref7 = Tree('((a, (b, (c, d, e, f))), (g, h));')
ref8 = Tree('((a, b, c, (d, e, f)), (g, h));')
ref9 = Tree('((d, b, c, (a, e, f)), (g, h));')
for ref in [ref6, ref7, ref8, ref9]:
#print gtree, ref
gtree.robinson_foulds(ref, expand_polytomies=True)[0]
#print "REF GOOD", gtree.robinson_foulds(ref, expand_polytomies=True, polytomy_size_limit=8)[0]
gtree = Tree('((g, h), ((a, b), (c, d), (e, f)));')
ref10 = Tree('((g, h), ((a, c), ((b, d), (e, f))));')
for ref in [ref10]:
#print gtree, ref
gtree.robinson_foulds(ref, expand_polytomies=True, polytomy_size_limit=8)[0]
def test_tree_compare(self):
def _astuple(d):
keynames = ["norm_rf", "rf", "max_rf", "ref_edges_in_source", "source_edges_in_ref", "effective_tree_size", "source_subtrees", "treeko_dist"]
# print
# print "ref", len(d["ref_edges"])
# print "src", len(d["source_edges"])
# print "common", len(d["common_edges"]), d['common_edges']
# print d["rf"], d["max_rf"]
return tuple([d[v] for v in keynames])
ref1 = Tree('((((A, B)0.91, (C, D))0.9, (E,F)0.96), (G, H));')
ref2 = Tree('(((A, B)0.91, (C, D))0.9, (E,F)0.96);')
s1 = Tree('(((A, B)0.9, (C, D))0.9, (E,F)0.9);')
small = Tree("((A, B), C);")
# RF unrooted in too small trees for rf, but with at least one internal node
self.assertEqual(_astuple(small.compare(ref1, unrooted=True)),
("NA", "NA", 0.0, 1.0, 1.0, 3, 1, "NA"))
small = Tree("(A, B);")
# RF unrooted in too small trees
self.assertEqual(_astuple(small.compare(ref1, unrooted=True)),
("NA", "NA", 0.0, "NA", "NA", 2, 1, "NA"))
small = Tree("(A, B);")
# RF unrooted in too small trees
self.assertEqual(_astuple(small.compare(ref1, unrooted=False)),
("NA", "NA", 0.0, "NA", "NA", 2, 1, "NA"))
# identical trees, 8 rooted partitions in total (4 an 4), and 6 unrooted
self.assertEqual(_astuple(s1.compare(ref1)),
(0.0, 0.0, 8, 1.0, 1.0, 6, 1, "NA"))
self.assertEqual(_astuple(s1.compare(ref1, unrooted=True)),
(0.0, 0.0, 6, 1.0, 1.0, 6, 1, "NA"))
# The same stats should be return discarding branches, as the topology
# is still identical, but branches used should be different
self.assertEqual(_astuple(s1.compare(ref1, min_support_source=0.99, min_support_ref=.99)),
(0.0, 0.0, 2, 1.0, 1.0, 6, 1, "NA"))
self.assertEqual(_astuple(s1.compare(ref1, min_support_source=0.99, min_support_ref=.99, unrooted=True)),
(0.0, 0.0, 2, 1.0, 1.0, 6, 1, "NA"))
self.assertEqual(_astuple(s1.compare(ref1, min_support_source=0.99)),
(0.0, 0.0, 5, 1/4., 1.0, 6, 1, "NA"))
self.assertEqual(_astuple(s1.compare(ref1, min_support_source=0.99, unrooted=True)),
(0.0, 0.0, 4, 6/8., 1.0, 6, 1, "NA"))
self.assertEqual(_astuple(s1.compare(ref1, min_support_ref=0.99)),
(0.0, 0.0, 5, 1.0, 1/4., 6, 1, "NA"))
self.assertEqual(_astuple(s1.compare(ref1, min_support_ref=0.99, unrooted=True)),
(0.0, 0.0, 4, 1.0, 6/8., 6, 1, "NA"))
# Three partitions different
s2 = Tree('(((A, E)0.9, (C, D))0.98, (B,F)0.95);')
self.assertEqual(_astuple(s2.compare(ref1)),
(6/8., 6, 8, 1/4., 1/4., 6, 1, "NA"))
self.assertEqual(_astuple(s2.compare(ref1, unrooted=True)),
(4/6., 4, 6, 6/8., 6/8., 6, 1, "NA"))
# lets discard one branch from source tree. there are 4 valid edges in
# ref, 3 in source there is only 2 edges in common, CD and root (which
# should be discounted for % of found branches)
self.assertEqual(_astuple(s2.compare(ref1, min_support_source=0.95)),
(5/7., 5, 7, 1/4., 1/3., 6, 1, "NA"))
# similar in unrooted, but we don not need to discount root edges
self.assertEqual(_astuple(s2.compare(ref1, min_support_source=0.95, unrooted=True)),
(3/5., 3, 5, 6/8., 6/7., 6, 1, "NA"))
# totally different trees
s3 = Tree('(((A, C)0.9, (E, D))0.98, (B,F)0.95);')
self.assertEqual(_astuple(s3.compare(ref1)),
(1.0, 8, 8, 0.0, 0.0, 6, 1, "NA"))
def test_tree_diff(self):
# this is the result of 100 Ktreedist runs on random trees, using rooted
# and unrooted topologies. ETE should provide the same RF result
samples = [
[28, True, '(((z,y),(x,(w,v))),(u,t),((s,r),((q,(p,o)),((n,(m,(l,(k,j)))),(i,(h,g))))));', '(((k,(j,(i,(h,g)))),z),(y,x),((w,v),((u,(t,(s,(r,q)))),(p,(o,(n,(m,l)))))));'],
[28, False, '(((t,s),((r,(q,p)),(o,n))),(((m,(l,(k,j))),(i,(h,g))),(z,(y,(x,(w,(v,u)))))));', '((((k,(j,i)),((h,g),z)),((y,(x,w)),((v,(u,t)),(s,(r,(q,p)))))),((o,n),(m,l)));'],
[18, True, '(((v,(u,(t,s))),((r,(q,(p,o))),((n,m),(l,k)))),(j,(i,(h,g))),(z,(y,(x,w))));', '(((z,(y,(x,w))),(v,(u,(t,s)))),((r,(q,p)),(o,(n,m))),((l,(k,(j,i))),(h,g)));'],
[26, True, '(((l,k),(j,i)),((h,g),(z,(y,(x,w)))),((v,(u,(t,(s,(r,q))))),((p,o),(n,m))));', '(((p,o),((n,(m,l)),(k,j))),((i,(h,g)),(z,y)),((x,(w,v)),((u,(t,s)),(r,q))));'],
[24, True, '(((o,(n,m)),(l,(k,(j,(i,(h,g)))))),(z,(y,x)),((w,v),((u,(t,(s,r))),(q,p))));', '(((t,(s,(r,(q,(p,o))))),(n,m)),((l,k),(j,(i,(h,g)))),((z,y),((x,w),(v,u))));'],
[24, True, '(((y,(x,(w,v))),(u,t)),((s,(r,(q,(p,o)))),(n,m)),((l,k),((j,(i,(h,g))),z)));', '(((z,(y,(x,w))),(v,(u,t))),(s,(r,(q,(p,(o,(n,(m,(l,k)))))))),(j,(i,(h,g))));'],
[28, False, '(((p,(o,(n,(m,l)))),((k,(j,i)),(h,g))),((z,y),((x,(w,(v,u))),(t,(s,(r,q))))));', '((((t,(s,r)),(q,p)),((o,n),(m,(l,(k,(j,i)))))),(((h,g),(z,(y,(x,w)))),(v,u)));'],
[28, True, '((((i,(h,g)),z),(y,x)),((w,v),((u,(t,(s,r))),(q,p))),((o,n),(m,(l,(k,j)))));', '((((h,g),z),(y,x)),(w,(v,u)),((t,s),((r,(q,p)),((o,(n,m)),(l,(k,(j,i)))))));'],
[28, True, '(((x,(w,(v,(u,(t,(s,(r,(q,(p,o))))))))),((n,(m,l)),(k,(j,i)))),(h,g),(z,y));', '(((u,t),(s,r)),((q,p),(o,(n,m))),(((l,(k,(j,i))),((h,g),(z,(y,x)))),(w,v)));'],
[22, False, '(((x,(w,(v,u))),((t,(s,r)),(q,p))),((o,(n,(m,l))),((k,j),((i,(h,g)),(z,y)))));', '(((z,(y,(x,(w,(v,u))))),(t,(s,r))),((q,(p,(o,(n,m)))),((l,k),(j,(i,(h,g))))));'],
[26, True, '((z,(y,(x,w))),(v,(u,(t,s))),((r,(q,(p,(o,(n,m))))),((l,k),(j,(i,(h,g))))));', '(((v,(u,t)),((s,r),((q,(p,o)),(n,(m,l))))),((k,j),((i,(h,g)),z)),(y,(x,w)));'],
[34, False, '((((i,(h,g)),(z,(y,x))),(w,v)),((u,t),((s,r),((q,(p,(o,n))),(m,(l,(k,j)))))));', '(((p,(o,(n,(m,(l,k))))),((j,i),(h,g))),(z,(y,(x,(w,(v,(u,(t,(s,(r,q))))))))));'],
[30, False, '(((i,(h,g)),(z,y)),((x,w),((v,(u,(t,(s,(r,q))))),(p,(o,(n,(m,(l,(k,j)))))))));', '((((l,k),(j,(i,(h,g)))),(z,(y,(x,w)))),((v,u),((t,s),((r,(q,p)),(o,(n,m))))));'],
[26, False, '(((v,(u,t)),((s,(r,q)),((p,o),((n,m),((l,k),(j,i)))))),((h,g),(z,(y,(x,w)))));', '(((y,(x,(w,v))),(u,(t,s))),(((r,q),((p,o),(n,(m,(l,k))))),((j,i),((h,g),z))));'],
[20, False, '(((u,(t,s)),(r,q)),(((p,o),((n,m),((l,k),((j,i),((h,g),z))))),(y,(x,(w,v)))));', '((((u,t),(s,r)),(((q,p),(o,(n,m))),(((l,k),(j,i)),((h,g),z)))),((y,x),(w,v)));'],
[20, True, '(((y,x),(w,v)),((u,(t,s)),((r,q),(p,(o,(n,(m,(l,k))))))),((j,(i,(h,g))),z));', '(((r,q),((p,o),(n,(m,(l,(k,j)))))),((i,(h,g)),(z,(y,(x,(w,v))))),(u,(t,s)));'],
[24, True, '((((k,(j,i)),(h,g)),((z,(y,(x,w))),((v,(u,t)),(s,r)))),(q,(p,(o,n))),(m,l));', '((((s,r),((q,p),(o,(n,m)))),((l,k),((j,i),((h,g),z)))),(y,x),(w,(v,(u,t))));'],
[18, True, '((w,(v,(u,(t,s)))),(r,q),((p,(o,n)),((m,(l,k)),((j,(i,(h,g))),(z,(y,x))))));', '(((y,x),((w,v),(u,(t,s)))),((r,(q,(p,(o,n)))),(m,l)),((k,j),((i,(h,g)),z)));'],
[26, True, '(((j,(i,(h,g))),(z,(y,(x,(w,(v,(u,t))))))),(s,r),((q,p),((o,(n,m)),(l,k))));', '(((s,(r,(q,(p,(o,(n,(m,l))))))),(k,j)),((i,(h,g)),(z,y)),((x,(w,v)),(u,t)));'],
[30, True, '((((r,(q,(p,(o,n)))),((m,l),(k,(j,i)))),((h,g),z)),(y,(x,(w,v))),(u,(t,s)));', '(((u,t),(s,r)),((q,p),(o,(n,(m,(l,(k,j)))))),(((i,(h,g)),(z,(y,x))),(w,v)));'],
[30, False, '((((m,(l,k)),(j,i)),(((h,g),(z,y)),(x,w))),((v,u),(t,(s,(r,(q,(p,(o,n))))))));', '(((u,t),((s,(r,q)),(p,(o,(n,(m,(l,k))))))),((j,(i,(h,g))),(z,(y,(x,(w,v))))));'],
[22, False, '(((k,(j,i)),(h,g)),((z,(y,x)),((w,(v,(u,(t,(s,r))))),((q,(p,(o,n))),(m,l)))));', '(((w,(v,u)),((t,(s,r)),((q,p),((o,(n,(m,l))),((k,(j,i)),((h,g),z)))))),(y,x));'],
[26, False, '(((x,(w,(v,(u,(t,s))))),(r,q)),((p,(o,(n,(m,l)))),((k,j),((i,(h,g)),(z,y)))));', '(((o,(n,m)),(l,(k,j))),(((i,(h,g)),(z,y)),((x,w),((v,u),((t,(s,r)),(q,p))))));'],
[28, True, '(((x,(w,v)),(u,(t,s))),((r,(q,(p,(o,(n,m))))),(l,(k,(j,(i,(h,g)))))),(z,y));', '((((i,(h,g)),(z,(y,x))),((w,v),((u,t),(s,(r,(q,p)))))),(o,n),((m,l),(k,j)));'],
[20, False, '((((m,l),(k,(j,(i,(h,g))))),(z,y)),((x,(w,(v,(u,(t,s))))),(r,(q,(p,(o,n))))));', '((((m,l),((k,(j,i)),(h,g))),(z,(y,(x,(w,v))))),((u,t),(s,(r,(q,(p,(o,n)))))));'],
[26, True, '(((o,(n,(m,(l,k)))),(j,i)),((h,g),(z,y)),((x,(w,(v,(u,(t,s))))),(r,(q,p))));', '((((t,(s,(r,(q,(p,(o,n)))))),(m,(l,k))),((j,i),(h,g))),(z,(y,x)),(w,(v,u)));'],
[22, False, '((((p,o),((n,m),((l,k),(j,i)))),((h,g),(z,y))),((x,(w,(v,u))),((t,s),(r,q))));', '((((v,(u,(t,s))),(r,q)),((p,o),((n,m),(l,k)))),(((j,i),(h,g)),(z,(y,(x,w)))));'],
[28, False, '((((r,(q,(p,(o,n)))),(m,(l,k))),(((j,i),(h,g)),((z,y),(x,w)))),((v,u),(t,s)));', '((((k,j),((i,(h,g)),(z,y))),(x,w)),(((v,(u,t)),(s,r)),((q,p),((o,n),(m,l)))));'],
[20, True, '((((q,(p,o)),(n,m)),((l,k),((j,i),(h,g)))),(z,(y,x)),((w,v),(u,(t,(s,r)))));', '((((l,(k,(j,i))),(h,g)),((z,y),(x,(w,v)))),(u,t),((s,(r,(q,(p,o)))),(n,m)));'],
[28, False, '(((t,(s,r)),(q,(p,o))),(((n,(m,(l,k))),(j,(i,(h,g)))),((z,y),(x,(w,(v,u))))));', '(((w,(v,u)),(t,s)),(((r,(q,p)),(o,n)),(((m,l),((k,j),((i,(h,g)),z))),(y,x))));'],
[24, True, '((((h,g),(z,y)),((x,(w,(v,u))),(t,(s,(r,q))))),(p,o),((n,m),((l,k),(j,i))));', '(((t,s),((r,(q,p)),((o,(n,(m,l))),((k,j),(i,(h,g)))))),(z,y),(x,(w,(v,u))));'],
[20, True, '(((p,o),(n,(m,(l,(k,(j,i)))))),((h,g),z),((y,(x,w)),((v,u),(t,(s,(r,q))))));', '(((y,(x,w)),(v,(u,t))),((s,r),(q,p)),((o,(n,m)),((l,(k,(j,i))),((h,g),z))));'],
[32, True, '((((s,(r,q)),((p,(o,n)),(m,(l,k)))),((j,(i,(h,g))),(z,y))),(x,w),(v,(u,t)));', '(((u,(t,(s,r))),((q,(p,o)),((n,(m,l)),(k,(j,i))))),((h,g),(z,(y,x))),(w,v));'],
[26, True, '(((z,(y,x)),(w,(v,(u,t)))),(s,(r,(q,(p,(o,n))))),((m,l),(k,(j,(i,(h,g))))));', '(((u,t),((s,r),((q,p),((o,n),((m,(l,k)),((j,i),((h,g),z))))))),(y,x),(w,v));'],
[10, True, '(((p,o),((n,m),((l,(k,(j,i))),((h,g),(z,y))))),(x,(w,(v,u))),((t,s),(r,q)));', '((((n,m),((l,(k,(j,i))),((h,g),(z,y)))),(x,w)),(v,(u,(t,(s,(r,q))))),(p,o));'],
[30, True, '((((h,g),z),((y,x),((w,v),(u,t)))),(s,r),((q,p),((o,n),((m,l),(k,(j,i))))));', '((((v,(u,(t,(s,r)))),(q,(p,o))),((n,m),((l,k),(j,(i,(h,g)))))),(z,y),(x,w));'],
[30, False, '(((q,(p,o)),((n,m),((l,(k,(j,(i,(h,g))))),(z,y)))),((x,(w,v)),(u,(t,(s,r)))));', '((((t,s),((r,q),((p,o),(n,m)))),((l,k),(j,i))),(((h,g),z),((y,(x,w)),(v,u))));'],
[24, False, '(((p,o),(n,m)),(((l,(k,(j,i))),(h,g)),((z,y),((x,w),((v,u),(t,(s,(r,q))))))));', '((x,(w,v)),((u,(t,(s,(r,q)))),((p,(o,(n,(m,(l,(k,(j,(i,(h,g))))))))),(z,y))));'],
[28, False, '(((z,y),((x,w),((v,u),(t,s)))),((r,(q,(p,(o,(n,m))))),((l,k),((j,i),(h,g)))));', '((((s,(r,q)),((p,o),((n,(m,l)),(k,(j,(i,(h,g))))))),(z,y)),((x,w),(v,(u,t))));'],
[24, False, '((((o,n),((m,l),((k,(j,i)),(h,g)))),(z,(y,x))),((w,(v,(u,(t,(s,r))))),(q,p)));', '(((q,(p,(o,(n,m)))),((l,(k,j)),(i,(h,g)))),(z,(y,(x,(w,(v,(u,(t,(s,r)))))))));'],
[22, True, '(((p,(o,(n,m))),((l,k),((j,i),((h,g),(z,y))))),(x,w),((v,u),((t,s),(r,q))));', '(((u,(t,(s,(r,(q,(p,(o,(n,m)))))))),((l,k),((j,i),((h,g),(z,(y,x)))))),w,v);'],
[28, False, '((((r,q),((p,o),(n,(m,l)))),((k,(j,i)),(h,g))),((z,y),((x,(w,v)),(u,(t,s)))));', '(((h,g),z),((y,x),((w,v),((u,t),((s,(r,(q,(p,(o,(n,m)))))),(l,(k,(j,i))))))));'],
[30, True, '((((h,g),z),((y,(x,(w,(v,u)))),((t,s),((r,(q,(p,o))),(n,m))))),(l,k),(j,i));', '((((o,n),((m,(l,(k,j))),((i,(h,g)),z))),(y,(x,(w,v)))),(u,(t,s)),(r,(q,p)));'],
[30, True, '(((v,u),(t,(s,(r,(q,p))))),((o,(n,m)),((l,(k,j)),((i,(h,g)),z))),(y,(x,w)));', '((((m,(l,k)),((j,i),(h,g))),(z,y)),(x,w),((v,(u,(t,(s,(r,q))))),(p,(o,n))));'],
[26, True, '(((q,p),((o,(n,(m,l))),(k,(j,i)))),((h,g),z),((y,x),((w,(v,(u,t))),(s,r))));', '((((j,(i,(h,g))),(z,(y,x))),((w,v),(u,t))),(s,(r,q)),((p,o),(n,(m,(l,k)))));'],
[20, False, '((((o,(n,m)),((l,k),((j,i),((h,g),z)))),(y,x)),(((w,v),(u,t)),((s,r),(q,p))));', '((((j,i),((h,g),z)),((y,x),(w,(v,(u,(t,(s,r))))))),((q,p),((o,n),(m,(l,k)))));'],
[30, False, '(((x,w),(v,(u,(t,(s,(r,(q,(p,(o,(n,m)))))))))),((l,k),((j,(i,(h,g))),(z,y))));', '(((m,l),((k,(j,(i,(h,g)))),z)),((y,(x,(w,(v,(u,t))))),((s,r),((q,p),(o,n)))));'],
[32, True, '((((y,x),(w,v)),((u,(t,(s,r))),(q,(p,o)))),((n,m),(l,(k,j))),((i,(h,g)),z));', '(((m,l),(k,(j,i))),((h,g),z),((y,(x,w)),((v,u),((t,s),(r,(q,(p,(o,n))))))));'],
[28, True, '(((v,u),((t,(s,(r,(q,p)))),((o,n),((m,l),(k,(j,(i,(h,g)))))))),(z,y),(x,w));', '((((n,m),((l,k),((j,i),((h,g),(z,(y,(x,(w,(v,u))))))))),(t,s)),(r,q),(p,o));'],
[32, False, '(((r,(q,p)),(o,n)),(((m,(l,k)),(j,i)),(((h,g),(z,y)),((x,w),((v,u),(t,s))))));', '(((y,x),((w,v),(u,(t,(s,r))))),(((q,(p,(o,n))),(m,l)),((k,(j,(i,(h,g)))),z)));'],
[20, True, '(((w,v),((u,(t,(s,r))),((q,p),((o,(n,(m,l))),((k,j),((i,(h,g)),z)))))),y,x);', '(((w,v),((u,t),(s,(r,q)))),((p,o),((n,(m,l)),(k,j))),((i,(h,g)),(z,(y,x))));'],
[24, False, '(((x,(w,v)),((u,(t,s)),(r,q))),(((p,o),((n,(m,l)),(k,j))),((i,(h,g)),(z,y))));', '((((i,(h,g)),z),((y,x),(w,v))),((u,(t,s)),((r,(q,(p,(o,(n,m))))),(l,(k,j)))));'],
[22, False, '((((k,(j,(i,(h,g)))),(z,(y,x))),((w,v),(u,t))),((s,(r,(q,(p,o)))),(n,(m,l))));', '(((w,v),(u,(t,(s,(r,(q,(p,o))))))),(((n,m),((l,(k,(j,i))),((h,g),z))),(y,x)));'],
[28, True, '(((x,w),((v,u),((t,s),(r,(q,p))))),((o,n),(m,l)),((k,(j,i)),((h,g),(z,y))));', '((((p,o),(n,m)),((l,(k,(j,i))),((h,g),z))),(y,(x,(w,v))),((u,t),(s,(r,q))));'],
[30, False, '(((q,p),((o,(n,(m,l))),((k,(j,(i,(h,g)))),z))),((y,x),((w,(v,u)),(t,(s,r)))));', '((((m,(l,k)),((j,(i,(h,g))),z)),(y,(x,w))),((v,(u,(t,(s,(r,q))))),(p,(o,n))));'],
[30, False, '(((y,x),((w,(v,(u,(t,(s,r))))),(q,p))),((o,(n,(m,(l,(k,(j,i)))))),((h,g),z)));', '((((t,(s,(r,q))),((p,(o,(n,(m,l)))),((k,(j,i)),(h,g)))),(z,y)),((x,w),(v,u)));'],
[20, False, '(((u,(t,s)),(r,(q,(p,(o,(n,(m,(l,(k,j))))))))),(((i,(h,g)),z),(y,(x,(w,v)))));', '(((o,n),(m,(l,(k,j)))),(((i,(h,g)),(z,y)),((x,(w,v)),((u,(t,(s,r))),(q,p)))));'],
[26, False, '(((t,s),((r,(q,(p,(o,n)))),(m,(l,k)))),(((j,i),((h,g),z)),((y,(x,w)),(v,u))));', '(((r,(q,(p,o))),((n,(m,(l,k))),((j,i),(h,g)))),((z,(y,(x,(w,v)))),(u,(t,s))));'],
[28, True, '((((r,q),((p,(o,(n,(m,l)))),((k,(j,i)),(h,g)))),(z,(y,(x,w)))),(v,u),(t,s));', '(((x,(w,(v,(u,(t,s))))),(r,(q,(p,o)))),(n,m),((l,k),((j,(i,(h,g))),(z,y))));'],
[28, False, '(((t,s),((r,(q,p)),((o,n),(m,(l,(k,(j,i))))))),(((h,g),(z,y)),(x,(w,(v,u)))));', '((((h,g),(z,(y,(x,(w,v))))),(u,(t,(s,r)))),((q,(p,(o,(n,m)))),(l,(k,(j,i)))));'],
[26, True, '((((q,(p,o)),((n,m),((l,(k,(j,i))),(h,g)))),(z,(y,x))),(w,v),(u,(t,(s,r))));', '(((y,x),(w,(v,u))),((t,(s,r)),((q,p),(o,n))),((m,(l,k)),((j,(i,(h,g))),z)));'],
[28, False, '((((q,(p,(o,n))),((m,(l,k)),((j,(i,(h,g))),z))),(y,x)),((w,(v,(u,t))),(s,r)));', '(((z,(y,x)),(w,v)),(((u,t),((s,(r,(q,p))),((o,n),(m,l)))),((k,(j,i)),(h,g))));'],
[22, True, '(((x,w),((v,(u,(t,s))),(r,q))),((p,(o,n)),((m,(l,k)),(j,(i,(h,g))))),(z,y));', '((((j,(i,(h,g))),(z,(y,x))),(w,(v,u))),((t,s),((r,q),(p,o))),((n,m),(l,k)));'],
[26, False, '((((n,(m,l)),(k,j)),(((i,(h,g)),(z,y)),((x,w),((v,u),(t,s))))),((r,q),(p,o)));', '(((v,u),(t,s)),(((r,(q,(p,(o,n)))),((m,(l,k)),(j,i))),((h,g),(z,(y,(x,w))))));'],
[32, False, '((((n,(m,(l,(k,j)))),((i,(h,g)),z)),(y,x)),((w,v),((u,(t,(s,r))),(q,(p,o)))));', '((((v,u),(t,(s,(r,(q,p))))),((o,(n,(m,(l,k)))),(j,(i,(h,g))))),((z,y),(x,w)));'],
[20, False, '((((q,(p,(o,n))),(m,l)),((k,(j,(i,(h,g)))),z)),((y,(x,(w,(v,(u,t))))),(s,r)));', '(((w,(v,(u,t))),(s,r)),(((q,p),(o,n)),(((m,l),(k,(j,i))),((h,g),(z,(y,x))))));'],
[20, True, '(((z,(y,(x,w))),(v,u)),((t,(s,r)),(q,(p,o))),((n,(m,l)),((k,(j,i)),(h,g))));', '((((q,(p,(o,n))),(m,l)),((k,j),(i,(h,g)))),(z,y),((x,w),((v,u),(t,(s,r)))));'],
[34, False, '(((w,(v,(u,(t,(s,(r,q)))))),(p,o)),(((n,m),(l,(k,j))),((i,(h,g)),(z,(y,x)))));', '(((y,(x,(w,(v,u)))),(t,(s,r))),(((q,(p,(o,(n,(m,(l,k)))))),(j,i)),((h,g),z)));'],
[26, False, '(((y,x),(w,(v,(u,t)))),(((s,r),((q,(p,o)),(n,(m,l)))),((k,(j,(i,(h,g)))),z)));', '(((s,(r,(q,(p,o)))),(n,m)),(((l,k),((j,i),((h,g),(z,(y,(x,w)))))),(v,(u,t))));'],
[30, False, '(((v,(u,t)),((s,r),((q,p),((o,(n,(m,(l,k)))),(j,i))))),(((h,g),z),(y,(x,w))));', '(((y,(x,(w,v))),((u,(t,s)),(r,(q,(p,o))))),((n,(m,l)),((k,(j,i)),((h,g),z))));'],
[26, False, '(((y,x),(w,v)),(((u,t),((s,(r,(q,p))),(o,n))),((m,(l,k)),((j,i),((h,g),z)))));', '((((s,(r,q)),((p,(o,n)),((m,l),(k,(j,i))))),((h,g),z)),((y,(x,w)),(v,(u,t))));'],
[22, True, '(((w,v),(u,t)),((s,r),((q,p),((o,(n,m)),((l,k),((j,i),(h,g)))))),(z,(y,x)));', '(((z,y),(x,(w,(v,u)))),(t,(s,r)),((q,(p,o)),((n,m),((l,(k,(j,i))),(h,g)))));'],
[28, False, '(((y,x),(w,(v,(u,t)))),(((s,(r,q)),((p,o),(n,(m,(l,k))))),((j,i),((h,g),z))));', '((((i,(h,g)),(z,(y,x))),((w,(v,u)),(t,s))),((r,q),((p,o),((n,m),(l,(k,j))))));'],
[26, False, '(((v,(u,(t,s))),(r,(q,p))),(((o,n),((m,(l,(k,j))),((i,(h,g)),(z,y)))),(x,w)));', '(((q,p),((o,n),((m,l),((k,j),((i,(h,g)),z))))),(y,(x,(w,(v,(u,(t,(s,r))))))));'],
[26, True, '(((t,(s,(r,q))),((p,o),((n,(m,l)),((k,j),((i,(h,g)),z))))),(y,x),(w,(v,u)));', '(((z,y),(x,w)),(v,u),((t,(s,r)),((q,(p,(o,(n,(m,l))))),((k,(j,i)),(h,g)))));'],
[30, True, '(((w,(v,(u,(t,(s,r))))),(q,p)),((o,(n,m)),((l,k),(j,i))),(((h,g),z),(y,x)));', '((((p,o),(n,(m,(l,(k,(j,(i,(h,g)))))))),(z,(y,x))),(w,(v,u)),((t,s),(r,q)));'],
[26, True, '((((i,(h,g)),(z,y)),(x,w)),((v,u),((t,(s,r)),(q,p))),((o,n),(m,(l,(k,j)))));', '(((l,k),((j,i),((h,g),(z,y)))),(x,w),((v,u),((t,s),((r,(q,(p,o))),(n,m)))));'],
[26, False, '(((x,w),((v,(u,(t,s))),((r,(q,p)),((o,(n,(m,(l,k)))),((j,i),(h,g)))))),(z,y));', '(((p,(o,(n,m))),(l,k)),(((j,i),(h,g)),((z,y),((x,(w,v)),((u,t),(s,(r,q)))))));'],
[24, True, '(((x,w),((v,(u,t)),(s,r))),((q,p),(o,(n,(m,(l,k))))),((j,i),((h,g),(z,y))));', '(((h,g),(z,y)),(x,(w,(v,u))),((t,(s,r)),(q,(p,(o,(n,(m,(l,(k,(j,i))))))))));'],
[24, True, '(((y,x),(w,v)),((u,t),((s,r),((q,p),((o,n),(m,(l,k)))))),((j,(i,(h,g))),z));', '((((r,(q,p)),(o,(n,(m,(l,(k,(j,(i,(h,g))))))))),(z,y)),(x,(w,v)),(u,(t,s)));'],
[28, False, '(((y,(x,(w,v))),((u,t),((s,(r,q)),((p,(o,n)),((m,l),(k,(j,i))))))),((h,g),z));', '(((v,u),(t,(s,(r,(q,(p,(o,n))))))),(((m,l),((k,j),((i,(h,g)),z))),(y,(x,w))));'],
[26, True, '((((h,g),z),((y,x),((w,(v,u)),((t,(s,(r,q))),(p,(o,n)))))),(m,(l,k)),(j,i));', '((z,y),(x,(w,(v,(u,t)))),((s,r),((q,p),((o,n),((m,(l,k)),(j,(i,(h,g))))))));'],
[24, True, '(((u,t),(s,r)),((q,p),((o,n),((m,(l,(k,(j,(i,(h,g)))))),z))),(y,(x,(w,v))));', '((((j,(i,(h,g))),z),(y,x)),(w,(v,(u,t))),((s,(r,(q,p))),((o,(n,m)),(l,k))));'],
[30, True, '(((t,(s,r)),((q,p),((o,n),(m,(l,(k,j)))))),((i,(h,g)),z),((y,x),(w,(v,u))));', '((((w,(v,(u,t))),(s,(r,q))),((p,(o,(n,m))),(l,k))),((j,i),(h,g)),(z,(y,x)));'],
[30, False, '((((x,(w,v)),(u,t)),((s,(r,q)),(p,o))),(((n,m),((l,k),((j,i),(h,g)))),(z,y)));', '((r,q),((p,(o,n)),((m,(l,(k,(j,i)))),((h,g),(z,(y,(x,(w,(v,(u,(t,s)))))))))));'],
[28, True, '((((k,j),((i,(h,g)),(z,(y,x)))),(w,v)),(u,t),((s,(r,q)),(p,(o,(n,(m,l))))));', '(((z,y),(x,w)),(v,(u,(t,(s,(r,q))))),((p,o),((n,(m,(l,(k,(j,i))))),(h,g))));'],
[18, True, '(((t,s),((r,(q,(p,o))),(n,m))),((l,(k,j)),((i,(h,g)),(z,y))),((x,w),(v,u)));', '((((l,k),(j,i)),(((h,g),(z,y)),(x,w))),((v,u),(t,s)),((r,q),((p,o),(n,m))));'],
[26, True, '(((h,g),z),(y,(x,w)),((v,(u,(t,s))),((r,(q,p)),((o,(n,(m,l))),(k,(j,i))))));', '(((s,r),(q,p)),((o,n),(m,l)),(((k,j),((i,(h,g)),(z,(y,x)))),(w,(v,(u,t)))));'],
[30, True, '(((x,w),((v,(u,(t,(s,(r,(q,(p,(o,n)))))))),((m,(l,k)),((j,i),(h,g))))),z,y);', '((((h,g),z),(y,x)),((w,v),((u,(t,s)),(r,q))),((p,(o,(n,(m,l)))),(k,(j,i))));'],
[30, False, '(((v,(u,(t,(s,(r,q))))),((p,(o,(n,m))),((l,(k,(j,i))),(h,g)))),((z,y),(x,w)));', '(((v,u),((t,(s,(r,(q,(p,o))))),(n,(m,(l,(k,j)))))),((i,(h,g)),(z,(y,(x,w)))));'],
[22, True, '(((z,y),((x,(w,v)),((u,(t,(s,r))),(q,(p,o))))),(n,m),((l,k),(j,(i,(h,g)))));', '(((r,q),(p,(o,(n,m)))),((l,(k,(j,(i,(h,g))))),(z,y)),((x,w),(v,(u,(t,s)))));'],
[30, True, '(((x,w),((v,(u,(t,(s,r)))),(q,p))),((o,n),(m,l)),((k,j),((i,(h,g)),(z,y))));', '((((p,o),((n,(m,(l,k))),((j,i),(h,g)))),((z,y),(x,(w,v)))),(u,t),(s,(r,q)));'],
[32, False, '(((r,(q,p)),(o,(n,m))),(((l,(k,(j,i))),(h,g)),((z,(y,(x,(w,(v,u))))),(t,s))));', '((((j,(i,(h,g))),(z,y)),(x,(w,(v,(u,t))))),(((s,r),(q,(p,o))),((n,m),(l,k))));'],
[30, False, '((((q,p),((o,(n,(m,(l,k)))),((j,(i,(h,g))),(z,y)))),(x,w)),((v,u),(t,(s,r))));', '((((o,(n,m)),((l,(k,(j,i))),((h,g),z))),(y,x)),((w,v),((u,t),((s,r),(q,p)))));'],
[28, False, '((((s,r),((q,(p,o)),(n,(m,l)))),((k,(j,i)),(h,g))),((z,(y,x)),(w,(v,(u,t)))));', '(((m,l),(k,j)),(((i,(h,g)),z),((y,x),((w,(v,(u,(t,(s,r))))),((q,p),(o,n))))));'],
[20, True, '((((z,y),(x,(w,(v,u)))),((t,s),(r,q))),((p,o),(n,(m,l))),((k,(j,i)),(h,g)));', '(((j,i),(h,g)),(z,(y,x)),((w,(v,u)),((t,(s,(r,q))),((p,o),((n,m),(l,k))))));'],
[20, False, '(((v,u),((t,s),(r,q))),(((p,o),(n,(m,l))),(((k,(j,i)),((h,g),z)),(y,(x,w)))));', '((((s,(r,q)),(p,o)),(((n,(m,l)),(k,(j,i))),((h,g),z))),((y,x),((w,v),(u,t))));'],
[28, True, '((z,y),(x,w),((v,u),((t,(s,(r,q))),((p,(o,(n,m))),(l,(k,(j,(i,(h,g)))))))));', '((((r,q),((p,o),((n,m),((l,k),(j,i))))),((h,g),(z,(y,x)))),(w,v),(u,(t,s)));'],
[24, False, '((((k,(j,(i,(h,g)))),(z,y)),(x,(w,v))),(((u,t),(s,(r,q))),((p,o),(n,(m,l)))));', '(((w,v),(u,(t,s))),(((r,(q,(p,o))),((n,m),(l,(k,(j,(i,(h,g))))))),(z,(y,x))));'],
[24, True, '((((n,m),((l,(k,j)),(i,(h,g)))),(z,y)),(x,(w,v)),((u,(t,(s,(r,q)))),(p,o)));', '(((r,q),(p,o)),((n,(m,l)),((k,j),((i,(h,g)),z))),((y,x),(w,(v,(u,(t,s))))));']]
# test RF exceptions
t1 = Tree('(a,b,(c,d,e));')
t2 = Tree('((a,b),(c,d,e));')
# testing unrooted trees
self.assertRaises(TreeError, t1.robinson_foulds, t2=t2)
# expand polytomies and unrooted trees
self.assertRaises(TreeError, t1.robinson_foulds, t2=t2,
unrooted_trees=True, expand_polytomies=True)
# usisng expand_polytomies and correct_by_size at the same time
self.assertRaises(TreeError, t1.robinson_foulds, t2=t1,
unrooted_trees=True, expand_polytomies=True,
correct_by_polytomy_size=True)
# correct by size when polytomies in both sides
self.assertRaises(TreeError, t1.robinson_foulds, t2=t1,
unrooted_trees=True, correct_by_polytomy_size=True)
# polytomy larger than deafult limit
self.assertRaises(TreeError, t2.robinson_foulds, t2=Tree('(a, (b,c,d,e,f,g,h));'),
expand_polytomies=True)
# duplicated items
t3 = Tree('(a, (b, (c, c)));')
self.assertRaises(TreeError, t3.robinson_foulds, t2=t2)
self.assertRaises(TreeError, t2.robinson_foulds, t2=t3)
# test RF using a knonw set of results
for RF, unrooted, nw1, nw2 in samples:
t1 = Tree(nw1)
t2 = Tree(nw2)
rf, rf_max, names, r1, r2, d1, d2 = t1.robinson_foulds(t2, unrooted_trees=unrooted)
real_max = (20*2) - 4 if not unrooted else (20*2) - 6
self.assertEqual(len(names), 20)
self.assertEqual(rf_max, real_max)
self.assertEqual(rf, RF)
comp = t1.compare(t2, unrooted=unrooted)
self.assertEqual(20, comp['effective_tree_size'])
self.assertEqual(rf_max, comp['max_rf'])
self.assertEqual(RF, comp['rf'])
# Let's insert some random nodes, that should be ignored
for target in random.sample([n for n in t2.get_descendants() if not n.is_leaf()], 5):
target.populate(5)
comp = t1.compare(t2, unrooted=unrooted)
self.assertEqual(20, comp['effective_tree_size'])
self.assertEqual(rf_max, comp['max_rf'])
self.assertEqual(RF, comp['rf'])
# test treeko functionality
t = PhyloTree('((((A,B),C), ((A,B),C)), (((A,B),C), ((A,B),C)));')
ref = Tree('((A,B),C);')
comp = t.compare(ref, has_duplications=True)
#from pprint import pprint
#pprint(comp)
self.assertEqual(comp['effective_tree_size'], 3)
self.assertEqual(comp['treeko_dist'], 0.0)
self.assertEqual(comp['norm_rf'], 0.0)
self.assertEqual(comp['rf'], 0.0)
self.assertEqual(comp['max_rf'], 2)
self.assertEqual(comp['source_subtrees'], 4)
# test polytomy corrections
ref2 = Tree("((a:1, (b:1, c:1, d:1):1):1, (e:1, f:1, g:1):1);")
gtree = Tree("((a:1, (b:1, (c:1, d:1):1):1), (e:1, (f:1, g:1):1):1);")
# Basic polytomy
rf, max_rf, names, r1, r2, d1, d2 = gtree.robinson_foulds(ref2)
self.assertEqual(rf, 2)
rf, max_rf, names, r1, r2, d1, d2 = gtree.robinson_foulds(ref2, expand_polytomies=True)
self.assertEqual(rf, 0)
# nested polytomies
gtree = Tree('((g, h), (a, (b, (c, (d,( e, f))))));')
ref3 = Tree('((a, b, c, (d, e, f)), (g, h));')
ref4 = Tree('((a, b, c, d, e, f), (g, h));')
ref5 = Tree('((a, b, (c, d, (e, f))), (g, h));')
rf, max_rf, names, r1, r2, d1, d2 = gtree.robinson_foulds(ref3)
self.assertEqual(rf, 3)
rf, max_rf, names, r1, r2, d1, d2 = gtree.robinson_foulds(ref3, expand_polytomies=True)
self.assertEqual(rf, 0)
rf, max_rf, names, r1, r2, d1, d2 = gtree.robinson_foulds(ref4)
self.assertEqual(rf, 4)
rf, max_rf, names, r1, r2, d1, d2 = gtree.robinson_foulds(ref4, expand_polytomies=True,
polytomy_size_limit=6)
self.assertEqual(rf, 0)
rf, max_rf, names, r1, r2, d1, d2 = gtree.robinson_foulds(ref5)
self.assertEqual(rf, 2)
rf, max_rf, names, r1, r2, d1, d2 = gtree.robinson_foulds(ref5, expand_polytomies=True)
self.assertEqual(rf, 0)
# two side polytomies
t1 = Tree("((a:1, (b:1, c:1, d:1):1):1, (e:1, f:1, g:1):1);")
t2 = Tree("((a:1, (b:1, c:1, d:1):1), (e:1, (f:1, g:1):1):1);")
rf, max_rf, names, r1, r2, d1, d2 = t1.robinson_foulds(t2, expand_polytomies=True)
self.assertEqual(rf, 0)
# test auto pruned tree topology
for RF, unrooted, nw1, nw2 in samples:
# Add fake tips in the newick
for x in "clanger":
nw1 = nw1.replace(x, "(%s,%s1)" %(x, x) )
nw2 = nw2.replace(x, "(%s,%s2)" %(x, x) )
t1 = Tree(nw1)
t2 = Tree(nw2)
rf, rf_max, names, r1, r2, d1, d2 = t1.robinson_foulds(t2, unrooted_trees=unrooted)
self.assertEqual(len(names), 20)
real_max = (20*2) - 4 if not unrooted else (20*2) - 6
self.assertEqual(rf_max, real_max)
self.assertEqual(rf, RF)
#print 'Testing RF with branch support thresholds...'
# test discarding lowly supported branches
for RF, unrooted, nw1, nw2 in samples:
# Add fake internal nodes with low support
for x in "jlnqr":
nw1 = nw1.replace(x, "(%s,(%s1, %s11)0.6)" %(x, x, x) )
nw2 = nw2.replace(x, "(%s,(%s1, %s11)0.5)" %(x, x, x) )
t1 = Tree(nw1)
t2 = Tree(nw2)
rf, rf_max, names, r1, r2, d1, d2 = t1.robinson_foulds(t2, unrooted_trees=unrooted,
min_support_t1 = 0.1, min_support_t2 = 0.1)
self.assertEqual(len(names), 30)
real_max = (30*2) - 4 if not unrooted else (30*2) - 6
self.assertEqual(rf_max, real_max)
self.assertEqual(rf, RF)
rf, rf_max, names, r1, r2, d1, d2 = t1.robinson_foulds(t2, unrooted_trees=unrooted,
min_support_t1 = 0.0, min_support_t2 = 0.51)
self.assertEqual(len(names), 30)
real_max = (30*2) - 4 - 5 if not unrooted else (30*2) - 6 -5 # -5 to discount low support branches
self.assertEqual(rf_max, real_max)
self.assertEqual(rf, RF)
rf, rf_max, names, r1, r2, d1, d2 = t1.robinson_foulds(t2, unrooted_trees=unrooted,
min_support_t1 = 0.61, min_support_t2 = 0.0)
self.assertEqual(len(names), 30)
real_max = (30*2) - 4 - 5 if not unrooted else (30*2) - 6 -5 # -5 to discount low support branches
self.assertEqual(rf_max, real_max)
self.assertEqual(rf, RF)
rf, rf_max, names, r1, r2, d1, d2 = t1.robinson_foulds(t2, unrooted_trees=unrooted,
min_support_t1 = 0.61, min_support_t2 = 0.51)
self.assertEqual(len(names), 30)
real_max = (30*2) - 4 - 10 if not unrooted else (30*2) - 6 -10 # -10 to discount low support branches
self.assertEqual(rf_max, real_max)
self.assertEqual(rf, RF)
def test_monophyly(self):
#print 'Testing monophyly checks...'
t = Tree("((((((a, e), i), o),h), u), ((f, g), j));")
is_mono, monotype, extra = t.check_monophyly(values=["a", "e", "i", "o", "u"], target_attr="name")
self.assertEqual(is_mono, False)
self.assertEqual(monotype, "polyphyletic")
is_mono, monotype, extra= t.check_monophyly(values=["a", "e", "i", "o"], target_attr="name")
self.assertEqual(is_mono, True)
self.assertEqual(monotype, "monophyletic")
is_mono, monotype, extra = t.check_monophyly(values=["i", "o"], target_attr="name")
self.assertEqual(is_mono, False)
self.assertEqual(monotype, "paraphyletic")
# Test examples
#print 'Testing monophyly check with unrooted trees'
t = PhyloTree('(aaa1, (aaa3, (aaa4, (bbb1, bbb2))));')
is_mono, montype, extra = t.check_monophyly(values=set(['aaa']), target_attr='species', unrooted=True)
self.assertEqual(is_mono, True)
self.assertEqual(extra, set())
t = PhyloTree('(aaa1, (bbb3, (aaa4, (bbb1, bbb2))));')
is_mono, montype, extra = t.check_monophyly(values=set(['aaa']), target_attr='species', unrooted=True)
self.assertEqual(is_mono, False)
self.assertEqual(extra, set([t&'bbb3']))
t = PhyloTree('(aaa1, (aaa3, (aaa4, (bbb1, bbb2))));')
is_mono, montype, extra = t.check_monophyly(values=set(['bbb']), target_attr='species', unrooted=True)
self.assertEqual(is_mono, True)
self.assertEqual(extra, set())
t = PhyloTree('(aaa1, (aaa3, (aaa4, (bbb1, ccc2))));')
is_mono, montype, extra = t.check_monophyly(values=set(['bbb', 'ccc']), target_attr='species', unrooted=True)
self.assertEqual(is_mono, True)
self.assertEqual(extra, set())
t = PhyloTree('(aaa1, (aaa3, (bbb4, (bbb1, bbb2))));')
is_mono, montype, extra = t.check_monophyly(values=set(['bbb4', 'bbb2']), target_attr='name', unrooted=True)
self.assertEqual(is_mono, False)
self.assertEqual(extra, set([t&'bbb1']))
t = PhyloTree('(aaa1, (aaa3, (bbb4, (bbb1, bbb2))));')
is_mono, montype, extra = t.check_monophyly(values=set(['bbb1', 'bbb2']), target_attr='name', unrooted=True)
self.assertEqual(is_mono, True)
self.assertEqual(extra, set())
t = PhyloTree('(aaa1, aaa3, (aaa4, (bbb1, bbb2)));')
is_mono, montype, extra = t.check_monophyly(values=set(['aaa']), target_attr='species', unrooted=True)
self.assertEqual(is_mono, True)
self.assertEqual(extra, set())
t = PhyloTree('(aaa1, bbb3, (aaa4, (bbb1, bbb2)));')
is_mono, montype, extra = t.check_monophyly(values=set(['aaa']), target_attr='species', unrooted=True)
self.assertEqual(is_mono, False)
self.assertEqual(extra, set([t&'bbb3']))
#print 'Check monophyly randomization test'
t = PhyloTree()
t.populate(100)
ancestor = t.get_common_ancestor(['aaaaaaaaaa', 'aaaaaaaaab', 'aaaaaaaaac'])
all_nodes = t.get_descendants()
# I test every possible node as root for the tree. The content of ancestor
# should allways be detected as monophyletic
results = set()
for x in all_nodes:
mono, part, extra = t.check_monophyly(values=set(ancestor.get_leaf_names()), target_attr='name', unrooted=True)
results.add(mono)
t.set_outgroup(x)
self.assertEqual(list(results), [True])
#print 'Testing get_monophyly'
t = Tree("((((((4, e), i)M1, o),h), u), ((3, 4), (i, june))M2);", format=1)
# we annotate the tree using external data
colors = {"a":"red", "e":"green", "i":"yellow",
"o":"black", "u":"purple", "4":"green",
"3":"yellow", "1":"white", "5":"red",
"june":"yellow"}
for leaf in t:
leaf.add_features(color=colors.get(leaf.name, "none"))
green_yellow_nodes = set([t&"M1", t&"M2"])
mono_nodes = t.get_monophyletic(values=["green", "yellow"], target_attr="color")
self.assertEqual(set(mono_nodes), green_yellow_nodes)
def test_copy(self):
t = Tree("((A, B)Internal_1:0.7, (C, D)Internal_2:0.5)root:1.3;", format=1)
# we add a custom annotation to the node named A
(t & "A").add_features(label="custom Value")
# we add a complex feature to the A node, consisting of a list of lists
(t & "A").add_features(complex=[[0,1], [2,3], [1,11], [1,0]])
t_nw = t.copy("newick")
t_nwx = t.copy("newick-extended")
t_pkl = t.copy("cpickle")
(t & "A").testfn = lambda: "YES"
t_deep = t.copy("deepcopy")
self.assertEqual((t_nw & "root").name, "root")
self.assertEqual((t_nwx & "A").label, "custom Value")
self.assertEqual((t_pkl & "A").complex[0], [0,1])
self.assertEqual((t_deep & "A").testfn(), "YES")
# def test_traversing_speed(self):
# return
# for x in xrange(10):
# t = Tree()
# t.populate(100000)
# leaves = t.get_leaves()
# sample = random.sample(leaves, 100)
# t1 = time.time()
# a = t.get_common_ancestor_OLD(sample)
# t2 = time.time() - t1
# print "OLD get common", t2
# t1 = time.time()
# b = t.get_common_ancestor(sample)
# t2 = time.time() - t1
# print "NEW get common", t2
# self.assertEqual(a, b)
# t1 = time.time()
# [n for n in t._iter_descendants_postorder_OLD()]
# t2 = time.time() - t1
# print "OLD postorder", t2
# t1 = time.time()
# [n for n in t._iter_descendants_postorder()]
# t2 = time.time() - t1
# print "NEW postorder", t2
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
Akshay0724/scikit-learn | sklearn/utils/tests/test_random.py | 84 | 7349 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from scipy.misc import comb as combinations
from numpy.testing import assert_array_almost_equal
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.random import random_choice_csc
from sklearn.utils.testing import (
assert_raises,
assert_equal,
assert_true)
###############################################################################
# test custom sampling without replacement algorithm
###############################################################################
def test_invalid_sample_without_replacement_algorithm():
assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown")
def test_sample_without_replacement_algorithms():
methods = ("auto", "tracking_selection", "reservoir_sampling", "pool")
for m in methods:
def sample_without_replacement_method(n_population, n_samples,
random_state=None):
return sample_without_replacement(n_population, n_samples,
method=m,
random_state=random_state)
check_edge_case_of_sample_int(sample_without_replacement_method)
check_sample_int(sample_without_replacement_method)
check_sample_int_distribution(sample_without_replacement_method)
def check_edge_case_of_sample_int(sample_without_replacement):
# n_population < n_sample
assert_raises(ValueError, sample_without_replacement, 0, 1)
assert_raises(ValueError, sample_without_replacement, 1, 2)
# n_population == n_samples
assert_equal(sample_without_replacement(0, 0).shape, (0, ))
assert_equal(sample_without_replacement(1, 1).shape, (1, ))
# n_population >= n_samples
assert_equal(sample_without_replacement(5, 0).shape, (0, ))
assert_equal(sample_without_replacement(5, 1).shape, (1, ))
# n_population < 0 or n_samples < 0
assert_raises(ValueError, sample_without_replacement, -1, 5)
assert_raises(ValueError, sample_without_replacement, 5, -1)
def check_sample_int(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# the sample is of the correct length and contains only unique items
n_population = 100
for n_samples in range(n_population + 1):
s = sample_without_replacement(n_population, n_samples)
assert_equal(len(s), n_samples)
unique = np.unique(s)
assert_equal(np.size(unique), n_samples)
assert_true(np.all(unique < n_population))
# test edge case n_population == n_samples == 0
assert_equal(np.size(sample_without_replacement(0, 0)), 0)
def check_sample_int_distribution(sample_without_replacement):
# This test is heavily inspired from test_random.py of python-core.
#
# For the entire allowable range of 0 <= k <= N, validate that
# sample generates all possible permutations
n_population = 10
# a large number of trials prevents false negatives without slowing normal
# case
n_trials = 10000
for n_samples in range(n_population):
# Counting the number of combinations is not as good as counting the
# the number of permutations. However, it works with sampling algorithm
# that does not provide a random permutation of the subset of integer.
n_expected = combinations(n_population, n_samples, exact=True)
output = {}
for i in range(n_trials):
output[frozenset(sample_without_replacement(n_population,
n_samples))] = None
if len(output) == n_expected:
break
else:
raise AssertionError(
"number of combinations != number of expected (%s != %s)" %
(len(output), n_expected))
def test_random_choice_csc(n_samples=10000, random_state=24):
# Explicit class probabilities
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Implicit class probabilities
classes = [[0, 1], [1, 2]] # test for array-like support
class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples)
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# Edge case probabilities 1.0 and 0.0
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])]
got = random_choice_csc(n_samples, classes, class_probabilites,
random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel(),
minlength=len(class_probabilites[k])) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
# One class target data
classes = [[1], [0]] # test for array-like support
class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])]
got = random_choice_csc(n_samples=n_samples,
classes=classes,
random_state=random_state)
assert_true(sp.issparse(got))
for k in range(len(classes)):
p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples
assert_array_almost_equal(class_probabilites[k], p, decimal=1)
def test_random_choice_csc_errors():
# the length of an array in classes and class_probabilites is mismatched
classes = [np.array([0, 1]), np.array([0, 1, 2, 3])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# the class dtype is not supported
classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])]
class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
# Given probabilities don't sum to 1
classes = [np.array([0, 1]), np.array([0, 1, 2])]
class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])]
assert_raises(ValueError, random_choice_csc, 4, classes,
class_probabilites, 1)
| bsd-3-clause |
Akshay0724/scikit-learn | examples/mixture/plot_gmm.py | 112 | 3265 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians
obtained with Expectation Maximisation (``GaussianMixture`` class) and
Variational Inference (``BayesianGaussianMixture`` class models with
a Dirichlet process prior).
Both models have access to five components with which to fit the data. Note
that the Expectation Maximisation model will necessarily use all five
components while the Variational Inference model will effectively only use as
many as are needed for a good fit. Here we can see that the Expectation
Maximisation model splits some components arbitrarily, because it is trying to
fit too many components, while the Dirichlet Process model adapts it number of
state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y_, means, covariances, index, title):
splot = plt.subplot(2, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-9., 5.)
plt.ylim(-3., 6.)
plt.xticks(())
plt.yticks(())
plt.title(title)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Gaussian Mixture')
# Fit a Dirichlet process Gaussian mixture using five components
dpgmm = mixture.BayesianGaussianMixture(n_components=5,
covariance_type='full').fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
'Bayesian Gaussian Mixture with a Dirichlet process prior')
plt.show()
| bsd-3-clause |
Laurawly/tvm-1 | python/tvm/relay/frontend/qnn_torch.py | 1 | 37509 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-outside-toplevel
""" Functions to convert quantized torch models to QNN """
import numpy as np
import tvm
from tvm import relay
from tvm.relay import expr as _expr
from tvm.relay import op as _op
from tvm.relay.frontend.common import infer_shape
from .common import logger
from .pytorch_utils import is_version_greater_than
class QNNParam:
"""A placeholder for weight quantization parameters"""
def __init__(self, weight, bias, scale, zero_point):
self.weight = weight
if bias is not None:
self.bias = bias.detach().numpy()
else:
self.bias = None
self.scale = _expr.const(scale)
self.zero_point = _expr.const(zero_point, dtype="int32")
class ConvPackedParam(QNNParam):
"""A placeholder for quantized conv2d op attributes
As of PyTorch 1.6, attributes of quantized conv2d ops, like
stride, padding etc are stored in ConvPackedParams objects,
together with weights and quantization parameters
"""
def __init__(
self,
weight_np,
bias,
scale,
zero_point,
stride,
padding,
dilation,
groups,
output_padding,
):
super().__init__(weight_np, bias, scale, zero_point)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
# Used only for conv_transpose2d
self.output_padding = output_padding
def _get_quant_params(qweight):
import torch
weight_np = qweight.dequantize().numpy()
if qweight.qscheme() == torch.per_tensor_affine:
return weight_np, qweight.q_scale(), int(qweight.q_zero_point())
scales = qweight.q_per_channel_scales().numpy()
zero_points = qweight.q_per_channel_zero_points().numpy()
# This is an assumption posed by QNN
msg = "The values of zero points should be all zero for per channel"
assert np.all(zero_points == 0), msg
return weight_np, scales, 0
def make_qnn_param(qweight, bias):
weight_np, scale, zero_point = _get_quant_params(qweight)
return QNNParam(weight_np, bias, scale, zero_point)
def make_conv_packed_param(qweight, bias, packed_params):
weight_np, scale, zero_point = _get_quant_params(qweight)
stride = packed_params.stride()
padding = packed_params.padding()
dilation = packed_params.dilation()
groups = packed_params.groups()
output_padding = packed_params.output_padding()
return ConvPackedParam(
weight_np,
bias,
scale,
zero_point,
stride,
padding,
dilation,
groups,
output_padding,
)
def get_weight_quant_params(script_module, packed_param_names):
"""Retrive and unpack weight parameters from quantized modules"""
import torch
param_name = "_packed_params"
quant_params = {}
def filter_func(named_module):
m = named_module[1]
return isinstance(m, torch.jit.RecursiveScriptModule) and (
("Conv" in m.original_name) or (m.original_name == "LinearPackedParams")
)
for name, m in filter(filter_func, script_module.named_modules()):
key = name + "." + param_name
state_dict = m.state_dict()
if key not in packed_param_names:
continue
if len(state_dict) == 0 and not hasattr(m, param_name):
# for v1.6 and above
# This case seems to happen if a model is serialized
# and loaded back
# This module can be safely ignored
continue
if len(state_dict) == 0 and hasattr(m, param_name):
# for v1.6 and above
packed_params = m._packed_params
else:
assert len(state_dict) == 1
packed_params = list(state_dict.values())[0]
if "Conv" in m.original_name and len(state_dict) == 0:
qweight, bias = torch.ops.quantized.conv2d_unpack(packed_params)
quant_params[key] = make_conv_packed_param(qweight, bias, packed_params)
elif "Conv" in m.original_name:
qweight, bias = torch.ops.quantized.conv2d_unpack(packed_params)
quant_params[key] = make_qnn_param(qweight, bias)
elif m.original_name == "LinearPackedParams":
qweight, bias = torch.ops.quantized.linear_unpack(packed_params)
quant_params[key] = make_qnn_param(qweight, bias)
return quant_params
def quantize_numpy(weight, scale, zero_point, out_dtype_np):
iinfo = np.iinfo(out_dtype_np)
clip_min = iinfo.min
clip_max = iinfo.max
if len(scale.shape) > 0:
scale = np.reshape(scale, [weight.shape[0]] + [1] * (len(weight.shape) - 1))
transformed = zero_point + weight / scale
return np.clip(np.round(transformed), clip_min, clip_max).astype(out_dtype_np)
def add_quant_params_to_outputs(
outputs, packed_param_map, quant_params, input_scales_for_bias, keep_quantized_weight=False
):
"""
Add quant params to outputs so that they can be referenced by other
ops later. Weights are quantized here.
"""
for node_name, packed_param_name in packed_param_map.items():
qparam = quant_params[packed_param_name]
weight_scale = _get_numpy(qparam.scale)
param_prefix = packed_param_name[: -len("._packed_params")]
if keep_quantized_weight:
qparam.weight_var = _expr.var(
param_prefix + "_weight", shape=qparam.weight.shape, dtype="int8"
)
qparam.weight = quantize_numpy(
qparam.weight, weight_scale, _get_numpy(qparam.zero_point), np.int8
)
qweight = qparam.weight_var
else:
qparam.weight_var = _expr.var(
param_prefix + "_weight", shape=qparam.weight.shape, dtype="float32"
)
qweight = relay.qnn.op.quantize(
qparam.weight_var, qparam.scale, qparam.zero_point, out_dtype="int8", axis=0
)
if qparam.bias is not None:
float_bias_var = _expr.var(
param_prefix + "_bias", shape=qparam.bias.shape, dtype="float32"
)
if node_name not in input_scales_for_bias:
# This case is for dynamic quantization, where the input activation scale is
# unknown until runtime.
qparam.bias_var = float_bias_var
qbias = qparam.bias_var
elif keep_quantized_weight:
qparam.bias_var = _expr.var(
param_prefix + "_bias", shape=qparam.bias.shape, dtype="int32"
)
qparam.bias = quantize_numpy(
qparam.bias, input_scales_for_bias[node_name] * weight_scale, 0, np.int32
)
qbias = qparam.bias_var
else:
qparam.bias_var = float_bias_var
qbias = relay.qnn.op.quantize(
qparam.bias_var,
_expr.const(input_scales_for_bias[node_name] * weight_scale),
_expr.const(0, "int32"),
out_dtype="int32",
axis=0,
)
else:
qbias = None
quant_params[packed_param_name] = qparam
params = [qweight, qparam.scale, qparam.zero_point, qbias]
if isinstance(quant_params[packed_param_name], ConvPackedParam):
params += [
qparam.stride,
qparam.padding,
qparam.dilation,
qparam.groups,
qparam.output_padding,
]
outputs[node_name] = params
def _get_quant_param_for_input(input_value):
"""
We want to know the input scale and zp of this input_value, since
input quant params are not explicitly passed around in torch (they
are embedded in a QTensor data structure, not visible statically).
We know that it is quantized using output scale and zp
of some previous quantized op. The purpose of this function
is to find that pair of parameters.
"""
# Indices for output scale and zp
# For example, in quantized::conv2d(%input, %1, %2, %3, %4, %5, %6, %7),
# 6th and 7th arg are output scale and zp respectively.
# PyTorch 1.6 changed qconv API
if is_version_greater_than("1.5.1"):
qconv_indices = (2, 3)
else:
qconv_indices = (6, 7)
output_quant_param_indices = {
"aten::quantize_per_tensor": (1, 2),
"quantized::conv2d": qconv_indices,
"quantized::conv2d_relu": qconv_indices,
"quantized::linear": (2, 3),
"quantized::linear_relu": (2, 3),
"quantized::add_relu": (2, 3),
"quantized::add": (2, 3),
"quantized::mul_relu": (2, 3),
"quantized::mul": (2, 3),
"quantized::cat": (2, 3),
"quantized::mul_scalar": (2, 3),
"quantized::add_scalar": (2, 3),
"quantized::hardswish": (1, 2),
"quantized::conv_transpose2d": qconv_indices,
}
def dfs(current_node):
# trace back to find the producer of this input value
current_op = current_node.kind()
if current_op in output_quant_param_indices:
indices = output_quant_param_indices[current_op]
scale = current_node.inputsAt(indices[0])
zp = current_node.inputsAt(indices[1])
return scale, zp
# Trace back eariler nodes, dfs order
# Assume quantized tensor comes earlier in the args
for arg in current_node.inputs():
return dfs(arg.node())
# shouldn't happen
assert False, "No producer for %s" % (str(current_node))
return dfs(input_value.node())
def _get_add_scalar_output_quant_param(input_scale, input_zero_point, scalar):
"""
Determine the output scale and zp of quantized::add_scalar op
This is used for mobilenet v3
Refer to aten/src/ATen/native/quantized/cpu/qadd.cpp
The names of variables are the same as torch impl
"""
q_min = 0
q_max = 255
s = input_scale
z = input_zero_point
c = scalar
c_q = round(c / s)
if q_min > z - c_q:
s_prime = (float(q_max) - (z - c_q)) / (float(q_max) - q_min) * s
z_prime = q_min
elif q_max < z - c_q:
s_prime = (float(z - c_q) - q_min) / (float(q_max) - q_min) * s
z_prime = q_max
else:
s_prime = s
z_prime = z - c_q
return s_prime, z_prime
def _get_mul_scalar_output_quant_param(input_scale, input_zero_point, scalar):
"""
Determine the output scale and zp of quantized::mul_scalar op
This is used for mobilenet v3
Refer to aten/src/ATen/native/quantized/cpu/qmul.cpp
The names of variables are the same as torch impl
"""
q_min = 0
q_max = 255
self_scale = input_scale
self_zero_point = input_zero_point
other_val = scalar
if other_val > 0.0:
s_prime = other_val * self_scale
z_prime = self_zero_point
elif other_val == 0.0:
s_prime = 1.0
z_prime = 0
else:
s_prime = abs(other_val) * self_scale
z_prime = q_max - (self_zero_point - q_min)
return s_prime, z_prime
def _add_output_quant_params_to_scalar_op(node, graph, input_scale, input_zero_point, scalar):
"""
The output scale and zp of {add,mul}_scalar op are not explicit in the IR
They are required for _get_quant_param_for_input above to work correctly
So calculate these params using the same way torch does, and make new
constant nodes in the input IR. Also add these params to the inputs of
scalar op.
For example,
%6 : float = prim::Constant[value=3.]()
%input : QUInt8(1, 3, 224, 224) = quantized::add_scalar(%x.1, %6)
becomes
%6 : float = prim::Constant[value=3.]()
%7 : float = prim::Constant[value=0.015686161816120148]()
%8 : int = prim::Constant[value=0]()
%input : UInt8(1, 3, 224, 224) = quantized::add_scalar(%x.1, %6, %7, %8)
%7 and %8 are newly created output scale and zp constant nodes
"""
# pylint: disable=c-extension-no-member
import torch
operator = node.kind()
if operator == "quantized::mul_scalar":
out_scale, out_zero_point = _get_mul_scalar_output_quant_param(
input_scale, input_zero_point, scalar
)
elif operator == "quantized::add_scalar":
out_scale, out_zero_point = _get_add_scalar_output_quant_param(
input_scale, input_zero_point, scalar
)
else:
raise NotImplementedError("unsupported scalar op: %s" % operator)
# create new constant nodes and add them to graph
out_scale_node = graph.create("prim::Constant")
out_zero_point_node = graph.create("prim::Constant")
out_scale_node.insertBefore(node)
out_zero_point_node.insertBefore(node)
out_scale_node.f_("value", out_scale)
out_zero_point_node.i_("value", out_zero_point)
out_scale_node.output().setType(torch._C.FloatType.get())
out_zero_point_node.output().setType(torch._C.IntType.get())
node.addInput(out_scale_node.output())
node.addInput(out_zero_point_node.output())
def add_input_quant_params_to_op_inputs(graph):
"""
In Torch, input quant params are not explicitly passed around
Instead, they are stored in QTensor data structure, and retrieved
at runtime by each quantized ops.
However, they need to be known statically for QNN translation.
To workaround and simplify the translation of inputs, we manually add
input quant params to inputs of Torch quantized operators listed below.
See _quantized_conv2d() below for example of why this is helpful.
For example,
%input : QUInt8(1, 512, 7, 7) = quantized::add(%x.8, %x.9, %434, %435)
becomes
%395 : float = prim::Constant[value=0.036212071776390076]()
%396 : int = prim::Constant[value=0]()
%430 : float = prim::Constant[value=0.16080744564533234]()
%431 : int = prim::Constant[value=42]()
%input : QUInt8(1, 512, 7, 7) = quantized::add(%x.8, %x.9, %434, %435,
%430, %431, %395, %396)
%434, %435 are output scale and zp of quantized::add op
%430, %431, %395, %396 are two pairs of input (scale, zp) for two tensors
added by this function
"""
# How many quantized tensors each op takes as inputs?
# A pair of (scale, zp) for each input quantized tensor will be added
# to the input nodes
num_quantized_inputs = {
"quantized::conv2d": 1,
"quantized::conv2d_relu": 1,
"quantized::linear": 1,
"quantized::linear_relu": 1,
"quantized::add_relu": 2,
"quantized::add": 2,
"quantized::mul_relu": 2,
"quantized::mul": 2,
"aten::dequantize": 1,
"aten::mean": 1,
"aten::upsample_nearest2d": 1,
"aten::upsample_bilinear2d": 1,
"aten::relu_": 1,
"aten::relu": 1,
"quantized::add_scalar": 1,
"quantized::mul_scalar": 1,
"quantized::relu6": 1,
"quantized::hardswish": 1,
"aten::hardsigmoid": 1,
"quantized::conv_transpose2d": 1,
}
need_input_quant_param = set(num_quantized_inputs.keys())
need_input_quant_param.add("quantized::cat")
input_scales_for_bias = {}
for node in graph.nodes():
operator = node.kind()
if operator not in need_input_quant_param:
continue
input_scales = []
input_zero_points = []
if operator == "quantized::cat":
# the number of inputs to concat is not constant
# so handle it separately
inputs = node.inputsAt(0).node().inputs()
for inp in inputs:
scale, zp = _get_quant_param_for_input(inp)
input_scales.append(scale)
input_zero_points.append(zp)
else:
for i in range(num_quantized_inputs[operator]):
scale, zp = _get_quant_param_for_input(node.inputsAt(i))
input_scales.append(scale)
input_zero_points.append(zp)
if operator in ["quantized::add_scalar", "quantized::mul_scalar"]:
scalar = node.inputsAt(1).node().f("value")
inp_scale = input_scales[0].node().f("value")
inp_zero_point = input_zero_points[0].node().i("value")
# see the comments in this function above
_add_output_quant_params_to_scalar_op(node, graph, inp_scale, inp_zero_point, scalar)
for scale, zp in zip(input_scales, input_zero_points):
node.addInput(scale)
node.addInput(zp)
if "conv" in operator or "linear" in operator:
# This is required for quantizing the bias
input_scales_for_bias[node.inputsAt(1).debugName()] = scale.node().f("value")
return input_scales_for_bias
def add_quant_params(params, quant_params):
"""Add quant parameters to TVM param map"""
for qparam in quant_params.values():
params[qparam.weight_var.name_hint] = tvm.nd.array(qparam.weight)
if qparam.bias is not None:
params[qparam.bias_var.name_hint] = tvm.nd.array(qparam.bias)
def apply_with_upcast(data, func):
inp = _op.cast(data, dtype="int32")
out = func(inp)
return _op.cast(out, "uint8")
def quantized_mean(data, input_scale, input_zero_point, func_fp32):
# refer to aten/src/ATen/native/quantized/cpu/qreduction.cpp
dequantized = relay.qnn.op.dequantize(data, input_scale, input_zero_point)
out = func_fp32(dequantized)
return relay.qnn.op.quantize(out, input_scale, input_zero_point, out_dtype="uint8", axis=1)
def quantized_upsample(data, input_scale, input_zero_point, func_fp32):
# currently piggy backs to fp32, it gets identical output as torch
data = relay.qnn.op.dequantize(data, input_scale, input_zero_point)
out = func_fp32(data)
return relay.qnn.op.quantize(out, input_scale, input_zero_point, out_dtype="uint8", axis=1)
def quantized_relu(data, input_zero_point):
# refer to aten/src/ATen/native/quantized/cpu/qrelu.cpp
zp = _op.cast(input_zero_point, dtype="uint8")
return _op.tensor.maximum(data, zp)
def _quantize_per_tensor():
def _impl(inputs, _):
return relay.qnn.op.quantize(
inputs[0], _expr.const(inputs[1]), _expr.const(inputs[2]), out_dtype="uint8", axis=1
)
return _impl
def _dequantize():
def _impl(inputs, _):
assert len(inputs) == 3, "Input quant params not found in op inputs"
inp_scale = _expr.const(inputs[1])
inp_zero_point = _expr.const(inputs[2])
return relay.qnn.op.dequantize(inputs[0], inp_scale, inp_zero_point)
return _impl
def _get_numpy(relay_const_scalar):
return relay_const_scalar.data.numpy()
def _get_scalar(relay_const_scalar):
return _get_numpy(relay_const_scalar).item(0)
def _do_bias_and_requantize(
output, bias, input_scale, weight_scale, output_scale, output_zero_point, with_relu
):
"""Output processing for conv and linear"""
# this is a vector for per channel case
requant_input_scale = _expr.const(_get_numpy(input_scale) * _get_numpy(weight_scale))
# Torch does bias add and requanize scale in fp32
# refer to third_party/fbgemm/include/fbgemm/OutputProcessing-inl.h
# Instead, we do bias add in int32 and use qnn requantize, which needs
# integer input.
# We observed no loss in accuracy in doing this way, and it is better
# for tvm because bias quantization can be done at compile time
# Instead, the torch way requires rounding of activation at runtime
if bias is not None:
requantize_input = _op.nn.bias_add(output, bias)
else:
requantize_input = output
requantized = relay.qnn.op.requantize(
requantize_input,
requant_input_scale,
relay.const(0, "int32"),
output_scale,
output_zero_point,
out_dtype="int32",
axis=1,
)
clip_min = 0
if with_relu:
clip_min = _get_scalar(output_zero_point)
clip = _op.tensor.clip(requantized, clip_min, 255.0)
return _op.cast(clip, dtype="uint8")
def _quantized_conv2d(with_relu=False):
def _impl(inputs, _):
# refer to src/ATen/native/quantized/cpu/qconv.cpp
# inputs[0]: input tensor
# inputs[1]: (weight, scale, zero_point, bias)
# inputs[2-5]: stride, padding, dilation, groups
# inputs[6]: output_scale
# inputs[7]: output_zero_point
# inputs[8]: input_scale (added manually by frontend)
# inputs[9]: input_zero_point (added manually by frontend)
conv_params = inputs[1]
weight = conv_params[0]
weight_scale = conv_params[1]
weight_zero_point = conv_params[2]
bias = conv_params[3]
if len(conv_params) > 4:
# Torch 1.6 or newer case
strides = conv_params[4]
padding = conv_params[5]
dilation = conv_params[6]
groups = conv_params[7]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
assert len(inputs) == 6, "Input quant params not found in op inputs"
# These are manually added by add_input_quant_params_to_op_inputs above
# In torch, they are retrieved from QTensor data structure at runtime
input_scale = _expr.const(inputs[4])
input_zero_point = _expr.const(inputs[5])
else:
strides = inputs[2]
padding = inputs[3]
dilation = inputs[4]
groups = inputs[5]
output_scale = _expr.const(inputs[6])
output_zero_point = _expr.const(inputs[7])
assert len(inputs) == 10, "Input quant params not found in op inputs"
input_scale = _expr.const(inputs[8])
input_zero_point = _expr.const(inputs[9])
weight_shape = infer_shape(weight)
kernel_size = (weight_shape[2], weight_shape[3])
out_channels = weight_shape[0]
if padding[0] != 0 or padding[1] != 0:
pad_val = _get_scalar(input_zero_point)
inp = _op.nn.pad(
inputs[0],
pad_width=((0, 0), (0, 0), (padding[0], padding[0]), (padding[1], padding[1])),
pad_value=float(pad_val),
)
else:
inp = inputs[0]
# padding is (0, 0) because we did explicit pad op with
# pad value being zero point above
conv_out = relay.qnn.op.conv2d(
inp,
weight,
input_zero_point,
weight_zero_point,
input_scale,
weight_scale,
kernel_size=kernel_size,
dilation=dilation,
strides=strides,
padding=(0, 0),
groups=groups,
channels=out_channels,
)
return _do_bias_and_requantize(
conv_out, bias, input_scale, weight_scale, output_scale, output_zero_point, with_relu
)
return _impl
def _linear(with_relu=False):
# similar to conv
def _impl(inputs, _):
weight = inputs[1][0]
weight_scale = inputs[1][1]
weight_zero_point = inputs[1][2]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
assert len(inputs) == 6, "Input quant params not found in op inputs"
# Manually added by add_input_quant_params_to_op_inputs above
input_scale = _expr.const(inputs[4])
input_zero_point = _expr.const(inputs[5])
weight_shape = infer_shape(weight)
dense = relay.qnn.op.dense(
inputs[0],
weight,
input_zero_point,
weight_zero_point,
input_scale,
weight_scale,
units=weight_shape[0],
)
bias_var = inputs[1][3]
return _do_bias_and_requantize(
dense, bias_var, input_scale, weight_scale, output_scale, output_zero_point, with_relu
)
return _impl
def _binop(relay_op, with_relu=False, fp32_piggy_back=False):
def qnn_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
):
qnn_out = relay_op(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
)
if with_relu:
clip_min = _get_scalar(output_zero_point)
return _op.tensor.clip(qnn_out, clip_min, 255)
return qnn_out
# refer to aten/src/ATen/native/quantized/cpu/{qadd, qmul}.cpp
# they piggy backs to fp32 math by dequantize -> fp32 math -> quantize
def torch_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
):
if isinstance(lhs, _expr.Call) and lhs.op.name == "qnn.quantize":
lhs = lhs.args[0]
else:
lhs = relay.qnn.op.dequantize(lhs, input_scale_lhs, input_zero_point_lhs)
if isinstance(rhs, _expr.Call) and rhs.op.name == "qnn.quantize":
rhs = rhs.args[0]
else:
rhs = relay.qnn.op.dequantize(rhs, input_scale_rhs, input_zero_point_rhs)
fp32_out = relay_op(lhs, rhs)
if with_relu:
fp32_out = _op.nn.relu(fp32_out)
return relay.qnn.op.quantize(
fp32_out, output_scale, output_zero_point, axis=-1, out_dtype="uint8"
)
def _impl(inputs, _):
lhs = inputs[0]
rhs = inputs[1]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
assert len(inputs) == 8, "Input quant params not found in op inputs"
# Manually added by add_input_quant_params_to_op_inputs above
input_scale_lhs = _expr.const(inputs[4])
input_zero_point_lhs = _expr.const(inputs[5])
input_scale_rhs = _expr.const(inputs[6])
input_zero_point_rhs = _expr.const(inputs[7])
if fp32_piggy_back:
logger.info("Piggy backing to FP32 op (PyTorch way)")
return torch_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
)
return qnn_impl(
lhs,
rhs,
input_scale_lhs,
input_zero_point_lhs,
input_scale_rhs,
input_zero_point_rhs,
output_scale,
output_zero_point,
)
return _impl
def _cat(fp32_piggy_back=False):
# refer to aten/src/ATen/native/quantized/cpu/qconcat.cpp
# for concat they also piggy backs to fp32(!)
# dequantize -> fp32 math -> quantize
def torch_impl(inputs, input_scales, input_zero_points, output_scale, output_zero_point, axis):
dequantized = []
for inp, inp_scale, inp_zp in zip(inputs, input_scales, input_zero_points):
dequantized.append(relay.qnn.op.dequantize(inp, inp_scale, inp_zp))
concat = _op.tensor.concatenate(dequantized, axis=axis)
return relay.qnn.op.quantize(
concat, output_scale, output_zero_point, axis=axis, out_dtype="uint8"
)
def _impl(inputs, _):
axis = inputs[1]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
num_inputs = (len(inputs) - 4) // 2
input_scales = []
input_zero_points = []
for i in range(0, num_inputs):
input_scales.append(_expr.const(inputs[4 + i * 2]))
input_zero_points.append(_expr.const(inputs[4 + i * 2 + 1]))
if fp32_piggy_back:
return torch_impl(
inputs[0], input_scales, input_zero_points, output_scale, output_zero_point, axis
)
return relay.qnn.op.concatenate(
inputs[0], input_scales, input_zero_points, output_scale, output_zero_point, axis
)
return _impl
def _add_scalar():
# this is used for mobilenet v3
def _impl(inputs, _):
# refer to aten/src/ATen/native/quantized/cpu/qadd.cpp
assert len(inputs) == 6, "Input quant params not found in op inputs"
s = inputs[4]
z = inputs[5]
c = inputs[1]
c_q = round(c / s)
q_min = 0
q_max = 255
# math for calculating output scale and zp are already done
# during _add_output_quant_params_to_scalar_op above
out_scale = _expr.const(inputs[2])
out_zp = _expr.const(inputs[3])
if q_min > z - c_q or q_max < z - c_q:
# TODO(masahi): Replace this with integer only compute
dequant = relay.qnn.op.dequantize(inputs[0], _expr.const(s), _expr.const(z))
dequantized_add = _op.tensor.add(dequant, _expr.const(c_q * s))
return relay.qnn.op.quantize(
dequantized_add, out_scale, out_zp, axis=1, out_dtype="uint8"
)
# only scale change
return inputs[0]
return _impl
def quantize_scalar(data, scale, zero_point):
# used to quantize 6., in mobilenet v3
transformed = zero_point + data / scale
return max(0, min(round(transformed), 255))
def _relu6():
# refer to src/ATen/native/quantized/cpu/qrelu.cpp
def _impl(inputs, _):
assert len(inputs) == 4, "Input quant params not found in op inputs"
input_scale = inputs[2]
input_zero_point = inputs[3]
six = quantize_scalar(6.0, input_scale, input_zero_point)
return _op.tensor.clip(inputs[0], input_zero_point, six)
return _impl
def _mul_scalar():
# this is used for mobilenet v3
def _impl(inputs, _):
# refer to aten/src/ATen/native/quantized/cpu/qmul.cpp
# math for calculating output scale and zp are already done
# during _add_output_quant_params_to_scalar_op above
assert len(inputs) == 6, "Input quant params not found in op inputs"
other_val = inputs[1] # scalar
if other_val > 0.0:
# only scale change
return inputs[0]
if other_val == 0.0:
shape = infer_shape(inputs[0])
return _op.full(_expr.const(0), shape, dtype="uint8")
# negative scale case
q_min = 0
q_max = 255
bias = _expr.const(q_max + q_min, dtype="int8")
int8 = bias - _op.cast(inputs[0], "int8")
return _op.cast(int8, "uint8")
return _impl
def _hswish():
# refer to src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp
# They fallback to fp32
def _impl(inputs, _):
assert len(inputs) == 5, "Input quant params not found in op inputs"
# TODO(masahi): Replace this with integer only compute.
# We do not have to strictly follow how PyTorch does it.
def relu6(x):
return _op.tensor.clip(x, 0.0, 6.0)
def hardsigmoid(x):
dtype = "float32"
return relu6(x + _expr.const(3.0, dtype=dtype)) / _expr.const(6.0, dtype=dtype)
output_scale = _expr.const(inputs[1])
output_zero_point = _expr.const(inputs[2])
input_scale = _expr.const(inputs[3])
input_zero_point = _expr.const(inputs[4])
dequant = relay.qnn.op.dequantize(inputs[0], input_scale, input_zero_point, axis=1)
dequantized_hswish = dequant * hardsigmoid(dequant)
return relay.qnn.op.quantize(
dequantized_hswish, output_scale, output_zero_point, out_dtype="uint8"
)
return _impl
def _linear_dynamic():
def _calculate_qparam(inp):
# reference ATen/native/quantized/cpu/qlinear_dynamic.cpp
# ChooseQuantizationParams function
mn = _op.min(inp)
mx = _op.max(inp)
# Ensure that the interval contains 0
mn = _op.minimum(mn, _op.const(0.0, dtype="float32"))
mx = _op.maximum(mx, _op.const(0.0, dtype="float32"))
qmax = 255
# reduce_range became True in v1.6
if is_version_greater_than("1.5.1"):
qmax = 127
scale = (mx - mn) / _expr.const(qmax, dtype="float32")
zero_point_from_min = -(mn / scale)
zero_point = _op.cast(_op.round(_op.clip(zero_point_from_min, 0.0, qmax)), "int32")
return scale, zero_point
def _impl(inputs, _):
weight = inputs[1][0]
weight_scale = inputs[1][1]
weight_zero_point = inputs[1][2]
inp = inputs[0]
input_scale, input_zero_point = _calculate_qparam(inp)
qinp = relay.qnn.op.quantize(inp, input_scale, input_zero_point, out_dtype="uint8")
data_shape = infer_shape(inp)
if len(data_shape) > 2:
qinp = _op.reverse_reshape(qinp, [-1, 0])
weight_shape = infer_shape(weight)
units = weight_shape[0]
dense = relay.qnn.op.dense(
qinp,
weight,
input_zero_point,
weight_zero_point,
input_scale,
weight_scale,
units=units,
)
bias_var = inputs[1][3]
dequant_scale = input_scale * weight_scale
dense_out = relay.qnn.op.dequantize(
dense, dequant_scale, input_zero_point=relay.const(0, "int32"), axis=1
)
if len(data_shape) > 2:
new_shape = list(data_shape[:-1])
new_shape.append(units)
dense_out = _op.reshape(dense_out, new_shape)
if bias_var is not None:
return dense_out + bias_var
return dense_out
return _impl
def _quantized_conv_transpose2d(with_relu=False):
def _impl(inputs, _):
# Refer to aten/src/ATen/native/quantized/cpu/qconv.cpp
# Supported in Torch 1.7 or newer
conv_params = inputs[1]
weight = conv_params[0]
weight_scale = conv_params[1]
weight_zero_point = conv_params[2]
bias = conv_params[3]
strides = conv_params[4]
padding = conv_params[5]
dilation = conv_params[6]
groups = conv_params[7]
output_padding = conv_params[8]
output_scale = _expr.const(inputs[2])
output_zero_point = _expr.const(inputs[3])
assert len(inputs) == 6, "Input quant params not found in op inputs"
# These are manually added by add_input_quant_params_to_op_inputs above
# In torch, they are retrieved from QTensor data structure at runtime
input_scale = _expr.const(inputs[4])
input_zero_point = _expr.const(inputs[5])
weight_shape = list(infer_shape(weight))
kernel_size = (weight_shape[2], weight_shape[3])
out_channels = weight_shape[1]
conv_out = relay.qnn.op.conv2d_transpose(
inputs[0],
weight,
input_zero_point,
weight_zero_point,
input_scale,
weight_scale,
kernel_size=kernel_size,
dilation=dilation,
strides=strides,
padding=padding,
groups=groups,
channels=out_channels,
output_padding=output_padding,
out_dtype="int32",
kernel_layout="IOHW",
)
return _do_bias_and_requantize(
conv_out, bias, input_scale, weight_scale, output_scale, output_zero_point, with_relu
)
return _impl
convert_map = {
"aten::quantize_per_tensor": _quantize_per_tensor(),
"quantized::conv2d_relu": _quantized_conv2d(with_relu=True),
"aten::dequantize": _dequantize(),
"quantized::conv2d": _quantized_conv2d(),
"quantized::add_relu": _binop(relay.qnn.op.add, with_relu=True),
"quantized::add": _binop(relay.qnn.op.add),
"quantized::mul_relu": _binop(relay.qnn.op.mul, with_relu=True),
"quantized::mul": _binop(relay.qnn.op.mul),
"quantized::linear": _linear(),
"quantized::linear_relu": _linear(with_relu=True),
"quantized::cat": _cat(),
"quantized::add_scalar": _add_scalar(),
"quantized::mul_scalar": _mul_scalar(),
"quantized::relu6": _relu6(),
"quantized::linear_dynamic": _linear_dynamic(),
"quantized::hardswish": _hswish(),
"quantized::conv_transpose2d": _quantized_conv_transpose2d(),
}
| apache-2.0 |
Akshay0724/scikit-learn | benchmarks/bench_plot_nmf.py | 28 | 15630 | """
Benchmarks of Non-Negative Matrix Factorization
"""
# Authors: Tom Dupre la Tour (benchmark)
# Chih-Jen Linn (original projected gradient NMF implementation)
# Anthony Di Franco (projected gradient, Python and NumPy port)
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import warnings
import numbers
import numpy as np
import matplotlib.pyplot as plt
import pandas
from sklearn.utils.testing import ignore_warnings
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition.nmf import NMF
from sklearn.decomposition.nmf import _initialize_nmf
from sklearn.decomposition.nmf import _beta_divergence
from sklearn.decomposition.nmf import INTEGER_TYPES, _check_init
from sklearn.externals.joblib import Memory
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import fast_dot, safe_sparse_dot, squared_norm
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted, check_non_negative
mem = Memory(cachedir='.', verbose=0)
###################
# Start of _PGNMF #
###################
# This class implements a projected gradient solver for the NMF.
# The projected gradient solver was removed from scikit-learn in version 0.19,
# and a simplified copy is used here for comparison purpose only.
# It is not tested, and it may change or disappear without notice.
def _norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return np.sqrt(squared_norm(x))
def _nls_subproblem(X, W, H, tol, max_iter, alpha=0., l1_ratio=0.,
sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtX = safe_sparse_dot(W.T, X)
WtW = fast_dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtX
if alpha > 0 and l1_ratio == 1.:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if _norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.",
ConvergenceWarning)
return H, grad, n_iter
def _fit_projected_gradient(X, W, H, tol, max_iter, nls_max_iter, alpha,
l1_ratio):
gradW = (np.dot(W, np.dot(H, H.T)) -
safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H) -
safe_sparse_dot(W.T, X, dense_output=True))
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol ** 2:
break
# update W
Wt, gradWt, iterW = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
W, gradW = Wt.T, gradWt.T
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _nls_subproblem(X, W, H, tolH, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
Wt, _, _ = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
W = Wt.T
return W, H, n_iter
class _PGNMF(NMF):
"""Non-Negative Matrix Factorization (NMF) with projected gradient solver.
This class is private and for comparison purpose only.
It may change or disappear without notice.
"""
def __init__(self, n_components=None, solver='pg', init=None,
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., nls_max_iter=10):
self.nls_max_iter = nls_max_iter
self.n_components = n_components
self.init = init
self.solver = solver
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
def fit(self, X, y=None, **params):
self.fit_transform(X, **params)
return self
def transform(self, X):
check_is_fitted(self, 'components_')
H = self.components_
W, _, self.n_iter_ = self._fit_transform(X, H=H, update_H=False)
return W
def inverse_transform(self, W):
check_is_fitted(self, 'components_')
return np.dot(W, self.components_)
def fit_transform(self, X, y=None, W=None, H=None):
W, H, self.n_iter = self._fit_transform(X, W=W, H=H, update_H=True)
self.components_ = H
return W
def _fit_transform(self, X, y=None, W=None, H=None, update_H=True):
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, "NMF (input X)")
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
if (not isinstance(n_components, INTEGER_TYPES) or
n_components <= 0):
raise ValueError("Number of components must be a positive integer;"
" got (n_components=%r)" % n_components)
if not isinstance(self.max_iter, INTEGER_TYPES) or self.max_iter < 0:
raise ValueError("Maximum number of iterations must be a positive "
"integer; got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
# check W and H, or initialize them
if self.init == 'custom' and update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=self.init,
random_state=self.random_state)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(
X, W, H, self.tol, self.max_iter, self.nls_max_iter,
self.alpha, self.l1_ratio)
else: # transform
Wt, _, n_iter = _nls_subproblem(X.T, H.T, W.T, self.tol,
self.nls_max_iter,
alpha=self.alpha,
l1_ratio=self.l1_ratio)
W = Wt.T
if n_iter == self.max_iter and self.tol > 0:
warnings.warn("Maximum number of iteration %d reached. Increase it"
" to improve convergence." % self.max_iter,
ConvergenceWarning)
return W, H, n_iter
#################
# End of _PGNMF #
#################
def plot_results(results_df, plot_name):
if results_df is None:
return None
plt.figure(figsize=(16, 6))
colors = 'bgr'
markers = 'ovs'
ax = plt.subplot(1, 3, 1)
for i, init in enumerate(np.unique(results_df['init'])):
plt.subplot(1, 3, i + 1, sharex=ax, sharey=ax)
for j, method in enumerate(np.unique(results_df['method'])):
mask = np.logical_and(results_df['init'] == init,
results_df['method'] == method)
selected_items = results_df[mask]
plt.plot(selected_items['time'], selected_items['loss'],
color=colors[j % len(colors)], ls='-',
marker=markers[j % len(markers)],
label=method)
plt.legend(loc=0, fontsize='x-small')
plt.xlabel("Time (s)")
plt.ylabel("loss")
plt.title("%s" % init)
plt.suptitle(plot_name, fontsize=16)
@ignore_warnings(category=ConvergenceWarning)
# use joblib to cache the results.
# X_shape is specified in arguments for avoiding hashing X
@mem.cache(ignore=['X', 'W0', 'H0'])
def bench_one(name, X, W0, H0, X_shape, clf_type, clf_params, init,
n_components, random_state):
W = W0.copy()
H = H0.copy()
clf = clf_type(**clf_params)
st = time()
W = clf.fit_transform(X, W=W, H=H)
end = time()
H = clf.components_
this_loss = _beta_divergence(X, W, H, 2.0, True)
duration = end - st
return this_loss, duration
def run_bench(X, clfs, plot_name, n_components, tol, alpha, l1_ratio):
start = time()
results = []
for name, clf_type, iter_range, clf_params in clfs:
print("Training %s:" % name)
for rs, init in enumerate(('nndsvd', 'nndsvdar', 'random')):
print(" %s %s: " % (init, " " * (8 - len(init))), end="")
W, H = _initialize_nmf(X, n_components, init, 1e-6, rs)
for max_iter in iter_range:
clf_params['alpha'] = alpha
clf_params['l1_ratio'] = l1_ratio
clf_params['max_iter'] = max_iter
clf_params['tol'] = tol
clf_params['random_state'] = rs
clf_params['init'] = 'custom'
clf_params['n_components'] = n_components
this_loss, duration = bench_one(name, X, W, H, X.shape,
clf_type, clf_params,
init, n_components, rs)
init_name = "init='%s'" % init
results.append((name, this_loss, duration, init_name))
# print("loss: %.6f, time: %.3f sec" % (this_loss, duration))
print(".", end="")
sys.stdout.flush()
print(" ")
# Use a panda dataframe to organize the results
results_df = pandas.DataFrame(results,
columns="method loss time init".split())
print("Total time = %0.3f sec\n" % (time() - start))
# plot the results
plot_results(results_df, plot_name)
return results_df
def load_20news():
print("Loading 20 newsgroups dataset")
print("-----------------------------")
from sklearn.datasets import fetch_20newsgroups
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data)
return tfidf
def load_faces():
print("Loading Olivetti face dataset")
print("-----------------------------")
from sklearn.datasets import fetch_olivetti_faces
faces = fetch_olivetti_faces(shuffle=True)
return faces.data
def build_clfs(cd_iters, pg_iters, mu_iters):
clfs = [("Coordinate Descent", NMF, cd_iters, {'solver': 'cd'}),
("Projected Gradient", _PGNMF, pg_iters, {'solver': 'pg'}),
("Multiplicative Update", NMF, mu_iters, {'solver': 'mu'}),
]
return clfs
if __name__ == '__main__':
alpha = 0.
l1_ratio = 0.5
n_components = 10
tol = 1e-15
# first benchmark on 20 newsgroup dataset: sparse, shape(11314, 39116)
plot_name = "20 Newsgroups sparse dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 6)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_20news = load_20news()
run_bench(X_20news, clfs, plot_name, n_components, tol, alpha, l1_ratio)
# second benchmark on Olivetti faces dataset: dense, shape(400, 4096)
plot_name = "Olivetti Faces dense dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 12)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_faces = load_faces()
run_bench(X_faces, clfs, plot_name, n_components, tol, alpha, l1_ratio,)
plt.show()
| bsd-3-clause |
QianruZhou333/ASleep | my_importData_2_floor.py | 2 | 1588 | import numpy as np
import pandas as pd
input_file = "2_floor.csv"
# comma delimited is the default
df = pd.read_csv(input_file, header = 0)
# for space delimited use:
# df = pd.read_csv(input_file, header = 0, delimiter = " ")
# for tab delimited use:
# df = pd.read_csv(input_file, header = 0, delimiter = "\t")
# put the original column names in a python list
original_headers = list(df.columns.values)
# remove the non-numeric columns
df = df._get_numeric_data()
# put the numeric column names in a python list
numeric_headers = list(df.columns.values)
# create a numpy array with the numeric values for input into scikit-learn
numpy_array = df.as_matrix()
# reverse the order of the columns
#numeric_headers.reverse()
#reverse_df = df[numeric_headers]
# throughput random forest regression
t = numpy_array[0:168, 3]
x = np.linspace(0, 167, 168)
xall = np.linspace(0, 189, 190)
xtest = np.linspace(168, 189, 22)
from sklearn.ensemble import RandomForestRegressor
#tfit = RandomForestRegressor(100).fit(x[:, None], t).predict(x[:, None])
tfit = RandomForestRegressor(100).fit(numpy_array[0:168, 0:2 ], t).predict(numpy_array[0:190, 0:2])
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.errorbar(x, t, 0.3, fmt='*', label="Training traffic")
ax.plot(xall, tfit, '-r', label="Predicted traffic")
ax.errorbar(xtest, numpy_array[168:190, 3], fmt='og', label="Test traffic")
ax.set_ylabel('Throughput (kbits/second)')
ax.set_xlabel('Time in hours')
ax.set_title('Taffic Prediction with Random Forest Regression on 2nd floor')
ax.legend(loc="upper left")
plt.show()
| apache-2.0 |
Akshay0724/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 346 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
ningchi/scikit-learn | sklearn/metrics/tests/test_ranking.py | 5 | 40934 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
return_indicator=True,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
DistrictDataLabs/yellowbrick | yellowbrick/regressor/residuals.py | 1 | 19569 | # yellowbrick.regressor.residuals
# Visualize the residuals between predicted and actual data for regression problems
#
# Author: Rebecca Bilbro
# Author: Benjamin Bengfort
# Created: Fri Jun 03 10:30:36 2016 -0700
#
# Copyright (C) 2016 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: residuals.py [7d3f5e6] [email protected] $
"""
Visualize the residuals between predicted and actual data for regression problems
"""
##########################################################################
## Imports
##########################################################################
import matplotlib.pyplot as plt
from scipy.stats import probplot
try:
# Only available in Matplotlib >= 2.0.2
from mpl_toolkits.axes_grid1 import make_axes_locatable
except ImportError:
make_axes_locatable = None
from yellowbrick.draw import manual_legend
from yellowbrick.utils.decorators import memoized
from yellowbrick.style.palettes import LINE_COLOR
from yellowbrick.exceptions import YellowbrickValueError
from yellowbrick.regressor.base import RegressionScoreVisualizer
## Packages for export
__all__ = ["ResidualsPlot", "residuals_plot"]
##########################################################################
## Residuals Plots
##########################################################################
class ResidualsPlot(RegressionScoreVisualizer):
"""
A residual plot shows the residuals on the vertical axis and the
independent variable on the horizontal axis.
If the points are randomly dispersed around the horizontal axis, a linear
regression model is appropriate for the data; otherwise, a non-linear
model is more appropriate.
Parameters
----------
estimator : a Scikit-Learn regressor
Should be an instance of a regressor, otherwise will raise a
YellowbrickTypeError exception on instantiation.
If the estimator is not fitted, it is fit when the visualizer is fitted,
unless otherwise specified by ``is_fitted``.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
hist : {True, False, None, 'density', 'frequency'}, default: True
Draw a histogram showing the distribution of the residuals on the
right side of the figure. Requires Matplotlib >= 2.0.2.
If set to 'density', the probability density function will be plotted.
If set to True or 'frequency' then the frequency will be plotted.
qqplot : {True, False}, default: False
Draw a Q-Q plot on the right side of the figure, comparing the quantiles
of the residuals against quantiles of a standard normal distribution.
Q-Q plot and histogram of residuals can not be plotted simultaneously,
either `hist` or `qqplot` has to be set to False.
train_color : color, default: 'b'
Residuals for training data are ploted with this color but also
given an opacity of 0.5 to ensure that the test data residuals
are more visible. Can be any matplotlib color.
test_color : color, default: 'g'
Residuals for test data are plotted with this color. In order to
create generalizable models, reserved test data residuals are of
the most analytical interest, so these points are highlighted by
having full opacity. Can be any matplotlib color.
line_color : color, default: dark grey
Defines the color of the zero error line, can be any matplotlib color.
train_alpha : float, default: 0.75
Specify a transparency for traininig data, where 1 is completely opaque
and 0 is completely transparent. This property makes densely clustered
points more visible.
test_alpha : float, default: 0.75
Specify a transparency for test data, where 1 is completely opaque
and 0 is completely transparent. This property makes densely clustered
points more visible.
is_fitted : bool or str, default='auto'
Specify if the wrapped estimator is already fitted. If False, the estimator
will be fit when the visualizer is fit, otherwise, the estimator will not be
modified. If 'auto' (default), a helper method will check if the estimator
is fitted before fitting it again.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Attributes
----------
train_score_ : float
The R^2 score that specifies the goodness of fit of the underlying
regression model to the training data.
test_score_ : float
The R^2 score that specifies the goodness of fit of the underlying
regression model to the test data.
Examples
--------
>>> from yellowbrick.regressor import ResidualsPlot
>>> from sklearn.linear_model import Ridge
>>> model = ResidualsPlot(Ridge())
>>> model.fit(X_train, y_train)
>>> model.score(X_test, y_test)
>>> model.show()
Notes
-----
ResidualsPlot is a ScoreVisualizer, meaning that it wraps a model and
its primary entry point is the ``score()`` method.
The residuals histogram feature requires matplotlib 2.0.2 or greater.
"""
def __init__(
self,
estimator,
ax=None,
hist=True,
qqplot=False,
train_color="b",
test_color="g",
line_color=LINE_COLOR,
train_alpha=0.75,
test_alpha=0.75,
is_fitted="auto",
**kwargs
):
# Initialize the visualizer base
super(ResidualsPlot, self).__init__(
estimator,
ax=ax,
is_fitted=is_fitted,
**kwargs)
# TODO: allow more scatter plot arguments for train and test points
# See #475 (RE: ScatterPlotMixin)
self.colors = {
"train_point": train_color,
"test_point": test_color,
"line": line_color,
}
self.hist = hist
if self.hist not in {True, "density", "frequency", None, False}:
raise YellowbrickValueError(
"'{}' is an invalid argument for hist, use None, True, "
"False, 'density', or 'frequency'".format(hist)
)
self.qqplot = qqplot
if self.qqplot not in {True, False}:
raise YellowbrickValueError(
"'{}' is an invalid argument for qqplot, use True, "
" or False".format(hist)
)
if self.hist in {True, "density", "frequency"} and self.qqplot in {True}:
raise YellowbrickValueError(
"Set either hist or qqplot to False, can not plot "
"both of them simultaneously."
)
if self.hist in {True, "density", "frequency"}:
self.hax # If hist is True, test the version availability
if self.qqplot in {True}:
self.qqax # If qqplot is True, test the version availability
# Store labels and colors for the legend ordered by call
self._labels, self._colors = [], []
self.alphas = {"train_point": train_alpha, "test_point": test_alpha}
@memoized
def hax(self):
"""
Returns the histogram axes, creating it only on demand.
"""
if make_axes_locatable is None:
raise YellowbrickValueError(
(
"residuals histogram requires matplotlib 2.0.2 or greater "
"please upgrade matplotlib or set hist=False on the visualizer"
)
)
divider = make_axes_locatable(self.ax)
hax = divider.append_axes("right", size=1, pad=0.1, sharey=self.ax)
hax.yaxis.tick_right()
hax.grid(False, axis="x")
return hax
@memoized
def qqax(self):
"""
Returns the Q-Q plot axes, creating it only on demand.
"""
if make_axes_locatable is None:
raise YellowbrickValueError(
(
"residuals histogram requires matplotlib 2.0.2 or greater "
"please upgrade matplotlib or set qqplot=False on the visualizer"
)
)
divider = make_axes_locatable(self.ax)
qqax = divider.append_axes("right", size=2, pad=0.25, sharey=self.ax)
qqax.yaxis.tick_right()
return qqax
def fit(self, X, y, **kwargs):
"""
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target values
kwargs: keyword arguments passed to Scikit-Learn API.
Returns
-------
self : ResidualsPlot
The visualizer instance
"""
# fit the underlying model to the data
super(ResidualsPlot, self).fit(X, y, **kwargs)
self.score(X, y, train=True)
return self
def score(self, X, y=None, train=False, **kwargs):
"""
Generates predicted target values using the Scikit-Learn
estimator.
Parameters
----------
X : array-like
X (also X_test) are the dependent variables of test set to predict
y : array-like
y (also y_test) is the independent actual variables to score against
train : boolean
If False, `score` assumes that the residual points being plotted
are from the test data; if True, `score` assumes the residuals
are the train data.
Returns
-------
score : float
The score of the underlying estimator, usually the R-squared score
for regression estimators.
"""
# Do not call super in order to differentiate train and test scores.
score = self.estimator.score(X, y, **kwargs)
if train:
self.train_score_ = score
else:
self.test_score_ = score
y_pred = self.predict(X)
residuals = y_pred - y
self.draw(y_pred, residuals, train=train)
return score
def draw(self, y_pred, residuals, train=False, **kwargs):
"""
Draw the residuals against the predicted value for the specified split.
It is best to draw the training split first, then the test split so
that the test split (usually smaller) is above the training split;
particularly if the histogram is turned on.
Parameters
----------
y_pred : ndarray or Series of length n
An array or series of predicted target values
residuals : ndarray or Series of length n
An array or series of the difference between the predicted and the
target values
train : boolean, default: False
If False, `draw` assumes that the residual points being plotted
are from the test data; if True, `draw` assumes the residuals
are the train data.
Returns
-------
ax : matplotlib Axes
The axis with the plotted figure
"""
if train:
color = self.colors["train_point"]
label = "Train $R^2 = {:0.3f}$".format(self.train_score_)
alpha = self.alphas["train_point"]
else:
color = self.colors["test_point"]
label = "Test $R^2 = {:0.3f}$".format(self.test_score_)
alpha = self.alphas["test_point"]
# Update the legend information
self._labels.append(label)
self._colors.append(color)
# Draw the residuals scatter plot
self.ax.scatter(y_pred, residuals, c=color, alpha=alpha, label=label)
# Add residuals histogram
if self.hist in {True, "frequency"}:
self.hax.hist(residuals, bins=50, orientation="horizontal", color=color)
elif self.hist == "density":
self.hax.hist(
residuals, bins=50, orientation="horizontal", density=True, color=color
)
# Add residuals histogram
if self.qqplot in {True}:
osm, osr = probplot(residuals, dist="norm", fit=False)
self.qqax.scatter(osm, osr, c=color, alpha=alpha, label=label)
# Ensure the current axes is always the main residuals axes
plt.sca(self.ax)
return self.ax
def finalize(self, **kwargs):
"""
Prepares the plot for rendering by adding a title, legend, and axis labels.
Also draws a line at the zero residuals to show the baseline.
Parameters
----------
kwargs: generic keyword arguments.
Notes
-----
Generally this method is called from show and not directly by the user.
"""
# Add the title to the plot
self.set_title("Residuals for {} Model".format(self.name))
# Set the legend with full opacity patches using manual legend
manual_legend(self, self._labels, self._colors, loc="best", frameon=True)
# Create a full line across the figure at zero error.
self.ax.axhline(y=0, c=self.colors["line"])
# Set the axes labels
self.ax.set_ylabel("Residuals")
self.ax.set_xlabel("Predicted Value")
# Finalize the histogram axes
if self.hist:
self.hax.axhline(y=0, c=self.colors["line"])
self.hax.set_xlabel("Distribution")
# Finalize the histogram axes
if self.qqplot:
self.qqax.set_title("Q-Q plot")
self.qqax.set_xlabel("Theoretical quantiles")
self.qqax.set_ylabel("Observed quantiles")
##########################################################################
## Quick Method
##########################################################################
def residuals_plot(
estimator,
X_train,
y_train,
X_test=None,
y_test=None,
ax=None,
hist=True,
qqplot=False,
train_color="b",
test_color="g",
line_color=LINE_COLOR,
train_alpha=0.75,
test_alpha=0.75,
is_fitted="auto",
show=True,
**kwargs
):
"""ResidualsPlot quick method:
A residual plot shows the residuals on the vertical axis and the
independent variable on the horizontal axis.
If the points are randomly dispersed around the horizontal axis, a linear
regression model is appropriate for the data; otherwise, a non-linear
model is more appropriate.
Parameters
----------
estimator : a Scikit-Learn regressor
Should be an instance of a regressor, otherwise will raise a
YellowbrickTypeError exception on instantiation.
If the estimator is not fitted, it is fit when the visualizer is fitted,
unless otherwise specified by ``is_fitted``.
X_train : ndarray or DataFrame of shape n x m
A feature array of n instances with m features the model is trained on.
Used to fit the visualizer and also to score the visualizer if test splits are
not directly specified.
y_train : ndarray or Series of length n
An array or series of target or class values. Used to fit the visualizer and
also to score the visualizer if test splits are not specified.
X_test : ndarray or DataFrame of shape n x m, default: None
An optional feature array of n instances with m features that the model
is scored on if specified, using X_train as the training data.
y_test : ndarray or Series of length n, default: None
An optional array or series of target or class values that serve as actual
labels for X_test for scoring purposes.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
hist : {True, False, None, 'density', 'frequency'}, default: True
Draw a histogram showing the distribution of the residuals on the
right side of the figure. Requires Matplotlib >= 2.0.2.
If set to 'density', the probability density function will be plotted.
If set to True or 'frequency' then the frequency will be plotted.
qqplot : {True, False}, default: False
Draw a Q-Q plot on the right side of the figure, comparing the quantiles
of the residuals against quantiles of a standard normal distribution.
Q-Q plot and histogram of residuals can not be plotted simultaneously,
either `hist` or `qqplot` has to be set to False.
train_color : color, default: 'b'
Residuals for training data are ploted with this color but also
given an opacity of 0.5 to ensure that the test data residuals
are more visible. Can be any matplotlib color.
test_color : color, default: 'g'
Residuals for test data are plotted with this color. In order to
create generalizable models, reserved test data residuals are of
the most analytical interest, so these points are highlighted by
having full opacity. Can be any matplotlib color.
line_color : color, default: dark grey
Defines the color of the zero error line, can be any matplotlib color.
train_alpha : float, default: 0.75
Specify a transparency for traininig data, where 1 is completely opaque
and 0 is completely transparent. This property makes densely clustered
points more visible.
test_alpha : float, default: 0.75
Specify a transparency for test data, where 1 is completely opaque
and 0 is completely transparent. This property makes densely clustered
points more visible.
is_fitted : bool or str, default='auto'
Specify if the wrapped estimator is already fitted. If False, the estimator
will be fit when the visualizer is fit, otherwise, the estimator will not be
modified. If 'auto' (default), a helper method will check if the estimator
is fitted before fitting it again.
show: bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however you cannot
call ``plt.savefig`` from this signature, nor ``clear_figure``. If False, simply
calls ``finalize()``
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
Returns
-------
viz : ResidualsPlot
Returns the fitted ResidualsPlot that created the figure.
"""
# Instantiate the visualizer
viz = ResidualsPlot(
estimator=estimator,
ax=ax,
hist=hist,
qqplot=qqplot,
train_color=train_color,
test_color=test_color,
line_color=line_color,
train_alpha=train_alpha,
test_alpha=test_alpha,
is_fitted=is_fitted,
**kwargs
)
# Fit the visualizer
viz.fit(X_train, y_train)
# Score the visualizer
if X_test is not None and y_test is not None:
viz.score(X_test, y_test)
elif X_test is not None or y_test is not None:
raise YellowbrickValueError(
"both X_test and y_test are required if one is specified"
)
else:
viz.score(X_train, y_train)
# Draw the final visualization
if show:
viz.show()
else:
viz.finalize()
# Return the visualizer
return viz
| apache-2.0 |
ningchi/scikit-learn | sklearn/linear_model/tests/test_base.py | 119 | 10082 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
| bsd-3-clause |
Fireblend/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 213 | 4690 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
thientu/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 266 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
ningchi/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 266 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
atavory/ibex | ibex/sklearn/_decomposition.py | 1 | 5282 | from __future__ import absolute_import
import inspect
import pandas as pd
from sklearn import base
from sklearn import decomposition as orig
from .._adapter import frame_ex
from .._utils import set_lowest_level_column_names
def transform(self, base_ret):
if isinstance(base_ret, pd.DataFrame):
set_lowest_level_column_names(
base_ret,
['comp_%i' % i for i in range(len(base_ret.columns))])
return base_ret
def get_transform_doc(
orig,
name,
est,
kwargs,
is_regressor,
is_classifier,
is_transformer,
is_clusterer,
has_dataframe_y):
return r"""
Example:
>>> import pandas as pd
>>> import numpy as np
>>> from ibex.sklearn import datasets
>>> from ibex.sklearn.decomposition import PCA as PdPCA
>>> iris = datasets.load_iris()
>>> features = iris['feature_names']
>>> iris = pd.DataFrame(
... np.c_[iris['data'], iris['target']],
... columns=features+['class'])
>>> iris[features]
sepal length (cm) sepal width (cm) petal length (cm) petal width (cm)
0 5.1 3.5 1.4 0.2
1 4.9 3.0 1.4 0.2
2 4.7 3.2 1.3 0.2
3 4.6 3.1 1.5 0.2
4 5.0 3.6 1.4 0.2
...
>>> PdPCA(n_components=2).fit(iris[features], iris['class']).transform(iris[features])
comp_0 comp_1
0 -2.684207 ...0.326607
1 -2.715391 ...0.169557
2 -2.889820 ...0.137346
3 -2.746437 ...0.311124
4 -2.728593 ...0.333925
...
"""
def fit_transform(self, base_ret):
if isinstance(base_ret, pd.DataFrame):
set_lowest_level_column_names(
base_ret,
['comp_%i' % i for i in range(len(base_ret.columns))])
return base_ret
def get_fit_transform_doc(
orig,
name,
est,
kwargs,
is_regressor,
is_classifier,
is_transformer,
is_clusterer,
has_dataframe_y):
return r"""
Example:
>>> import pandas as pd
>>> import numpy as np
>>> from ibex.sklearn import datasets
>>> from ibex.sklearn.decomposition import PCA as PdPCA
>>> iris = datasets.load_iris()
>>> features = iris['feature_names']
>>> iris = pd.DataFrame(
... np.c_[iris['data'], iris['target']],
... columns=features+['class'])
>>> iris[features]
sepal length (cm) sepal width (cm) petal length (cm) petal width (cm)
0 5.1 3.5 1.4 0.2
1 4.9 3.0 1.4 0.2
2 4.7 3.2 1.3 0.2
3 4.6 3.1 1.5 0.2
4 5.0 3.6 1.4 0.2
...
>>> PdPCA(n_components=2).fit(iris[features], iris['class']).transform(iris[features])
comp_0 comp_1
0 -2.684207 ...0.326607
1 -2.715391 ...0.169557
2 -2.889820 ...0.137346
3 -2.746437 ...0.311124
4 -2.728593 ...0.333925
...
"""
def components_(self, base_ret):
return pd.DataFrame(
base_ret,
index=['comp_%i' % i for i in range(len(base_ret))],
columns=self.x_columns)
def get_components_doc(
orig,
name,
est,
kwargs,
is_regressor,
is_classifier,
is_transformer,
is_clusterer,
has_dataframe_y):
return r"""
Example:
>>> import pandas as pd
>>> import numpy as np
>>> from ibex.sklearn import datasets
>>> from ibex.sklearn.decomposition import PCA as PdPCA
>>> iris = datasets.load_iris()
>>> features = iris['feature_names']
>>> iris = pd.DataFrame(
... np.c_[iris['data'], iris['target']],
... columns=features+['class'])
>>> iris[features]
sepal length (cm) sepal width (cm) petal length (cm) petal width (cm)
0 5.1 3.5 1.4 0.2
1 4.9 3.0 1.4 0.2
2 4.7 3.2 1.3 0.2
3 4.6 3.1 1.5 0.2
4 5.0 3.6 1.4 0.2
...
>>> PdPCA(n_components=2).fit(iris[features], iris['class']).transform(iris[features])
comp_0 comp_1
0 -2.684207 ...0.326607
1 -2.715391 ...0.169557
2 -2.889820 ...0.137346
3 -2.746437 ...0.311124
4 -2.728593 ...0.333925
...
"""
| bsd-3-clause |
jmargeta/scikit-learn | sklearn/neighbors/nearest_centroid.py | 4 | 5895 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD Style.
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..externals.six.moves import xrange
from ..metrics.pairwise import pairwise_distances
from ..utils.validation import check_arrays, atleast2d_or_csr
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
`centroids_` : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf–idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
X, y = check_arrays(X, y, sparse_format="csr")
if sp.issparse(X) and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
classes = np.unique(y)
self.classes_ = classes
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
for i, cur_class in enumerate(classes):
center_mask = y == cur_class
if sp.issparse(X):
center_mask = np.where(center_mask)[0]
self.centroids_[i] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.array(X.mean(axis=0))[0]
# Number of clusters in each class.
nk = np.array([np.sum(classes == cur_class)
for cur_class in classes])
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = np.array(np.power(X - self.centroids_[y], 2))
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation = np.multiply(deviation, signs)
# Now adjust the centroids using the deviation
msd = np.multiply(ms, deviation)
self.centroids_ = np.array([dataset_centroid_ + msd[i]
for i in xrange(n_classes)])
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
X = atleast2d_or_csr(X)
if not hasattr(self, "centroids_"):
raise AttributeError("Model has not been trained yet.")
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
Akshay0724/scikit-learn | examples/covariance/plot_covariance_estimation.py | 97 | 5074 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.model_selection import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
thientu/scikit-learn | sklearn/tests/test_pipeline.py | 35 | 15221 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
def inverse_transform(self, X):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_X1d_inverse_transform():
transformer = TransfT()
pipeline = make_pipeline(transformer)
X = np.ones(10)
msg = "1d X will not be reshaped in pipeline.inverse_transform"
assert_warns_message(FutureWarning, msg, pipeline.inverse_transform, X)
| bsd-3-clause |
Fireblend/scikit-learn | examples/mixture/plot_gmm_pdf.py | 282 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
Akshay0724/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 47 | 3653 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_parallel():
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_array_equal(ms1.cluster_centers_, ms2.cluster_centers_)
assert_array_equal(ms1.labels_, ms2.labels_)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
ningchi/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 233 | 9928 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |