repo_name
stringlengths 6
96
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 762
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
aestrivex/ielu | ielu/plotting_utils.py | 1 | 6230 |
import os
import numpy as np
import nibabel as nib
from traits.api import HasTraits, Float, Int, Tuple
from traitsui.api import View, Item, CSVListEditor
from .geometry import get_vox2rasxfm, apply_affine, get_std_orientation
from .utils import get_subjects_dir
def force_render( figure=None ):
from mayavi import mlab
figure.scene.render()
mlab.draw(figure=figure)
from pyface.api import GUI
_gui = GUI()
orig_val = _gui.busy
_gui.set_busy(busy=True)
_gui.process_events()
_gui.set_busy(busy=orig_val)
_gui.process_events()
def coronal_slice(elecs, start=None, end=None, outfile=None,
subjects_dir=None,
subject=None, reorient2std=True, dpi=150, size=(200,200),
title=None):
'''
create an image of a coronal slice which serves as a guesstimate of a
depth lead inserted laterally and nonvaryingly in the Y axis
plot the electrodes from the lead overlaid on the slice in the X and Z
directions
Paramaters
----------
elecs : List( Electrode )
list of electrode objects forming this depth lead
start : Electrode
Electrode object at one end of the depth lead
end : Electrode
Electrode object at the other end of the depth lead
outfile : Str
Filename to save the image to
subjects_dir : Str | None
The freesurfer subjects_dir. If this is None, it is assumed to be the
$SUBJECTS_DIR environment variable. If this folder is not writable,
the program will crash.
subject : Str | None
The freesurfer subject. If this is None, it is assumed to be the
$SUBJECT environment variable.
reorient2std : Bool
Apply a matrix to rotate orig.mgz to the standard MNI orientation
emulating fslreorient2std. Pretty much always true here.
dpi : Int
Dots per inch of output image
size : Tuple
Specify a 2-tuple to control the image size, default is (200,200)
title : Str
Specify a matplotlib title
'''
print('creating coronal slice with start electrodes %s' % str(start))
subjdir_subj = get_subjects_dir( subjects_dir=subjects_dir,
subject=subject )
orig = os.path.join(subjdir_subj, 'mri', 'orig.mgz')
x_size, y_size, z_size = nib.load(orig).shape
# vox2ras and ras2vox shouldnt have different procedures for
# getting the different dimensions. the matrix showing those
# dimensions has the correct dimensions by inversion beforehand
# in the complex 3-way case
vox2ras = get_vox2rasxfm(orig, stem='vox2ras')
ras2vox = np.linalg.inv(vox2ras)
ras2vox[0:3,3] = (x_size/2, y_size/2, z_size/2)
rd, ad, sd = get_std_orientation(ras2vox)
# rd, = np.where(np.abs(ras2vox[:,0]) == np.max(np.abs(ras2vox[:,0])))
# ad, = np.where(np.abs(ras2vox[:,1]) == np.max(np.abs(ras2vox[:,1])))
# sd, = np.where(np.abs(ras2vox[:,2]) == np.max(np.abs(ras2vox[:,2])))
r_size = [x_size, y_size, z_size][rd]
a_size = [x_size, y_size, z_size][ad]
s_size = [x_size, y_size, z_size][sd]
#starty = pd.map_cursor( start.asras(), pd.current_affine, invert=True)[1]
#endy = pd.map_cursor( end.asras(), pd.current_affine, invert=True )[1]
#midy = (starty+endy)/2
#pd.move_cursor(128, midy, 128)
electrodes = np.squeeze([apply_affine([e.asras()], ras2vox)
for e in elecs])
#electrodes = np.array([pd.map_cursor(e.asras(), ras2vox,
# invert=True) for e in elecs])
vol = np.transpose( nib.load(orig).get_data(), (rd, ad, sd) )
if start is not None and end is not None:
start_coord = np.squeeze(apply_affine([start.asras()], ras2vox))
end_coord = np.squeeze(apply_affine([end.asras()], ras2vox))
if start_coord[rd] == end_coord[rd]:
raise ValueError('This lead has no variation in the X axis. It shouldnt be displayed coronally')
slice = np.zeros((s_size, r_size))
m = (start_coord[ad]-end_coord[ad])/(start_coord[rd]-end_coord[rd])
b = start_coord[ad]-m*start_coord[rd]
rnew = np.arange(r_size)
anew = m*rnew+b
alower = np.floor(anew)
afrac = np.mod(anew, 1)
try:
for rvox in rnew:
slice[:, rvox] = (vol[rvox, alower[rvox], :] *
(1-afrac[rvox])+vol[rvox, alower[rvox]+1, :] *
afrac[rvox])
except IndexError:
raise ValueError('This lead has minimal variation in the X axis. It shouldnt be displayed coronally')
else:
slice_nr = np.mean(electrodes[:,ad])
slice = vol[:, slice_nr, :].T
vox2pix = np.zeros((2,4))
vox2pix[0, rd] = 1
vox2pix[1, sd] = 1
ras2pix = np.dot(vox2pix, ras2vox)
pix = np.dot(ras2pix,
np.transpose([np.append(e.asras(), 1) for e in elecs]))
#add data to coronal plane
import pylab as pl
fig = pl.figure()
pl.imshow(slice, cmap='gray')
pl.scatter(pix[0,:], pix[1,:], s=10, c='red', edgecolor='yellow',
linewidths=0.4)
if title is not None:
pl.title(title)
pl.axis('off')
#pl.show()
if outfile is not None:
pl.savefig(outfile, dpi=dpi)
return fig
def sequence_3d_images( figure ):
from mayavi import mlab
views = [lambda:mlab.view( azimuth=0, elevation=90, figure=figure ),
lambda:mlab.view( azimuth=180, elevation=90, figure=figure ),
lambda:mlab.view( azimuth=0, elevation=0, figure=figure ),
lambda:mlab.view( azimuth=90, elevation=90, figure=figure ),
lambda:mlab.view( azimuth=270, elevation=90, figure=figure )]
for view in views:
yield view
def save_opaque_clinical_sequence( savefile, mayavi_figure ):
import pylab as pl
from matplotlib.backends.backend_pdf import PdfPages
from mayavi import mlab
with PdfPages(savefile) as pdf:
for angle in sequence_3d_images( mayavi_figure ):
angle()
force_render( figure=mayavi_figure )
pixmap = mlab.screenshot( figure=mayavi_figure )
mpl_figure = pl.figure()
pl.imshow(pixmap, figure=mpl_figure)
pdf.savefig(mpl_figure)
| gpl-3.0 |
albu5/deepGroup | group-detection/vis_kernel_affinity.py | 1 | 6897 |
"""
Visualize and save group detections
"""
from utils import read_cad_frames, read_cad_annotations, get_interaction_features, add_annotation, custom_interaction_features
from matplotlib import pyplot as plt
from keras.models import Model
from keras.layers import Input, Dense
from keras.layers.merge import add
from keras.optimizers import adam
import keras.backend as kb
from keras.models import load_model
import numpy as np
from scipy import io
from utils import get_group_instance
from matplotlib import pyplot as plt
from keras import losses
from sklearn.cluster import AffinityPropagation, DBSCAN
import os
from numpy import genfromtxt, savetxt
def kernel_loss(y_true, y_pred):
inclusion_dist = kb.max(y_pred - 1 + y_true)
exclusion_dist = kb.max(y_pred - y_true)
exclusion_dist2 = kb.mean(y_pred * (1 - y_true) * kb.cast(y_pred > 0, dtype=kb.floatx()))
# ex_cost = kb.log(exclusion_dist + kb.epsilon()) * (1 - kb.prod(y_true))
# in_cost = -kb.log(inclusion_dist + kb.epsilon()) * (1 - kb.prod(1 - y_true))
ex_cost = (exclusion_dist2 + kb.epsilon()) * (1 - kb.prod(y_true))
in_cost = -(inclusion_dist + kb.epsilon()) * (1 - kb.prod(1 - y_true))
# return inclusion_dist * kb.sum(y_true)
# return - exclusion_dist * (1 - kb.prod(y_true))
return in_cost + ex_cost
def simple_loss(y_true, y_pred):
res_diff = (y_true - y_pred) * kb.cast(y_pred >= 0, dtype=kb.floatx())
return kb.sum(kb.square(res_diff))
'''
======================CONSTANTS==================================================================================
'''
losses.simple_loss = simple_loss
losses.kernel_loss = kernel_loss
if not os.path.exists('res'):
os.makedirs('res')
model_path = './models/cad-kernel-affinity-bottom-max-long-custom-20.h5'
n_max = 20
cad_dir = '../ActivityDataset'
annotations_dir = cad_dir + '/' + 'csvanno-long-feat'
# annotations_dir = cad_dir + '/' + 'csvanno-long-feat'
annotations_dir_out = cad_dir + '/' + 'csvanno-long-feat-results'
colorstr = ['r', 'g', 'b', 'k', 'w', 'm', 'c', 'y']
n = 11
# specify which sequences are visualized
test_seq = [1, 4, 5, 6, 8, 2, 7, 28, 35, 11, 10, 26]
kernel_net = load_model(model_path)
for n in range(1, 45):
try:
if n == 39:
continue
f = 1
pose_vec = genfromtxt('../common/pose/pose%2.2d.txt' % n)
pose_meta = genfromtxt('../common/pose/meta%2.2d.txt' % n)
action_vec = genfromtxt('../split1/atomic/actions.txt')
action_meta = genfromtxt('../split1/atomic/meta.txt')
if not os.path.exists('res/scene%d' % n):
os.makedirs('res/scene%d' % n)
# fig, ax = plt.subplots(1)
anno_data = read_cad_annotations(annotations_dir, n)
print(anno_data.shape)
n_frames = np.max(anno_data[:, 0])
while True:
f += 10
if f > n_frames:
break
im = read_cad_frames(cad_dir, n, f)
bx, by, bp, bi = custom_interaction_features(anno_data, f, max_people=20)
# print(bx[0].shape, by[0].shape, bp[0].shape)
# print(len(bx))
# print(bx[0][:, 18:22])
anno_data_i = anno_data[anno_data[:, 0] == f, :]
n_ped = anno_data_i.shape[0]
affinity_matrix = []
for j in range(len(bx)):
# uncomment this to visualize
# plt.clf()
# ax.clear()
# ax.imshow(im)
temp = np.squeeze(kernel_net.predict_on_batch(x=[bx[j], bp[j]]))
affinity_matrix.append(temp[0:n_ped].tolist())
# uncomment this to visualize individual features
# print()
# print(np.round(temp[0:n_ped], 2))
# print(by[j][0:n_ped, 0])
# print()
# add_annotation(ax, bi[j, 2:6], 'k', 2)
for k in range(n_ped):
l = k
# uncomment this to visualize individual features
# if l is not j:
# if np.sum(bi[k, 10:]) > 0:
# if temp[l] > 0.5:
# add_annotation(ax, bi[k, 2:6], 'b', 2)
# ax.arrow(bi[k, 2], bi[k, 3], 64 * bx[k][k, 0], 64 * bx[k][k, 1], fc='b', ec='b',
# head_width=5, head_length=10)
# else:
# add_annotation(ax, bi[k, 2:6], 'r', 2)
# ax.arrow(bi[k, 2], bi[k, 3], 64 * bx[k][k, 0], 64 * bx[k][k, 1], fc='r', ec='r',
# head_width=5, head_length=10)
# uncomment this to visualize individual features
# add_annotation(ax, bi[j, 2:6], 'k', 2)
# ax.arrow(bi[j, 2], bi[j, 3], 64*bx[j][0, 0], 64*bx[j][0, 1], fc='k', ec='k',
# head_width=5, head_length=10)
# print(bi[j, 2], bi[j, 3], 64*bx[j][0, 0], 64*bx[j][0, 1])
# plt.pause(1./2)
affinity_matrix = np.array(affinity_matrix)
affinity_matrix[np.isnan(affinity_matrix)] = 0
# try:
# print(affinity_matrix)
if n_ped == 0:
continue
af = DBSCAN(eps=0.55, metric='precomputed', min_samples=0, algorithm='auto', n_jobs=1)
af.fit(1-affinity_matrix)
# print(af.labels_)
af_labels = af.labels_
n_samples = af_labels.shape[0]
ipm = np.zeros(shape=(n_samples, n_samples))
for i1 in range(n_samples):
for i2 in range(n_samples):
ipm[i1, i2] = af_labels[i1] == af_labels[i2]
# print(ipm)
gt_pm = np.zeros(shape=(n_samples, n_samples))
for i1 in range(n_samples):
for i2 in range(n_samples):
gt_pm[i1, i2] = by[i1][i2, 0]
# print(gt_pm)
# ax.clear()
# ax.imshow(im)
# for j in range(len(bx)):
# # plt.clf()
# add_annotation(ax, bi[j, 2:6], colorstr[af_labels[j]], 2)
# plt.pause(0.01)
# plt.savefig('res/scene%d/frame%d.png' % (n, f))
## except:
# print('skipped clustering')
for ped_i in range(af_labels.shape[0]):
# print(np.sum(np.bitwise_and(anno_data[:, 0] == f, anno_data[:, 1] == ped_i+1)))
anno_data[np.bitwise_and(anno_data[:, 0] == f, anno_data[:, 1] == ped_i+1), 8] = af_labels[ped_i] + 1
# save group labels
savetxt(annotations_dir_out + '/' + 'data_%2.2d.txt' % n, anno_data, delimiter=',')
print(annotations_dir_out + '/' + 'data_%2.2d.txt' % n)
except:
print('skipped', n)
| mit |
MohammedWasim/scikit-learn | sklearn/linear_model/tests/test_base.py | 101 | 12205 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data, _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_linear_regression_sample_weights():
rng = np.random.RandomState(0)
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
clf = LinearRegression()
clf.fit(X, y, sample_weight)
coefs1 = clf.coef_
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_greater(clf.score(X, y), 0.9)
assert_array_almost_equal(clf.predict(X), y)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
scaled_y = y * np.sqrt(sample_weight)
scaled_X = X * np.sqrt(sample_weight)[:, np.newaxis]
clf.fit(X, y)
coefs2 = clf.coef_
assert_array_almost_equal(coefs1, coefs2)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
clf = LinearRegression()
# make sure the "OK" sample weights actually work
clf.fit(X, y, sample_weights_OK)
clf.fit(X, y, sample_weights_OK_1)
clf.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
rng = np.random.RandomState(0)
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
| bsd-3-clause |
ibell/coolprop | wrappers/Python/CoolProp/Plots/PsychChart.py | 1 | 5652 | """
This file implements a psychrometric chart for air at 1 atm
"""
import CoolProp
HAProps = CoolProp.HumidAirProp.HAProps
InlineLabel = CoolProp.Plots.Plots.InlineLabel
import matplotlib, numpy, textwrap
import_template=(
"""
import numpy, matplotlib
from CoolProp.HumidAirProp import HAProps
from CoolProp.Plots.Plots import InlineLabel
p = 101.325
Tdb = numpy.linspace(-10,60,100)+273.15
#Make the figure and the axes
fig=matplotlib.pyplot.figure(figsize=(10,8))
ax=fig.add_axes((0.1,0.1,0.85,0.85))
"""
)
closure_template=(
"""
matplotlib.pyplot.show()
"""
)
Tdb = numpy.linspace(-10,60,100)+273.15
p = 101.325
class PlotFormatting(object):
def plot(self,ax):
ax.set_xlim(Tdb[0]-273.15,Tdb[-1]-273.15)
ax.set_ylim(0,0.03)
ax.set_xlabel(r"Dry bulb temperature [$^{\circ}$C]")
ax.set_ylabel(r"Humidity ratio ($m_{water}/m_{dry\ air}$) [-]")
def __str__(self):
return textwrap.dedent("""
ax.set_xlim(Tdb[0]-273.15,Tdb[-1]-273.15)
ax.set_ylim(0,0.03)
ax.set_xlabel(r"Dry bulb temperature [$^{\circ}$C]")
ax.set_ylabel(r"Humidity ratio ($m_{water}/m_{dry\ air}$) [-]")
""")
class SaturationLine(object):
def plot(self,ax):
w = [HAProps('W','T',T,'P',p,'R',1.0) for T in Tdb]
ax.plot(Tdb-273.15,w,lw=2)
def __str__(self):
return textwrap.dedent("""
# Saturation line
w = [HAProps('W','T',T,'P',p,'R',1.0) for T in Tdb]
ax.plot(Tdb-273.15,w,lw=2)
"""
)
class HumidityLabels(object):
def __init__(self,RH_values,h):
self.RH_values = RH_values
self.h = h
def plot(self,ax):
xv = Tdb #[K]
for RH in self.RH_values:
yv = [HAProps('W','T',T,'P',p,'R',RH) for T in Tdb]
y = HAProps('W','P',p,'H',self.h,'R',RH)
T_K,w,rot = InlineLabel(xv, yv, y=y, axis = ax)
string = r'$\phi$='+str(RH*100)+'%'
#Make a temporary label to get its bounding box
bbox_opts = dict(boxstyle='square,pad=0.0',fc='white',ec='None',alpha = 0.5)
ax.text(T_K-273.15,w,string,rotation = rot,ha ='center',va='center',bbox=bbox_opts)
def __str__(self):
return textwrap.dedent("""
xv = Tdb #[K]
for RH in {RHValues:s}:
yv = [HAProps('W','T',T,'P',p,'R',RH) for T in Tdb]
y = HAProps('W','P',p,'H',{h:f},'R',RH)
T_K,w,rot = InlineLabel(xv, yv, y=y, axis = ax)
string = r'$\phi$='+str(RH*100)+'%'
bbox_opts = dict(boxstyle='square,pad=0.0',fc='white',ec='None',alpha = 0.5)
ax.text(T_K-273.15,w,string,rotation = rot,ha ='center',va='center',bbox=bbox_opts)
""".format(h=self.h, RHValues=str(self.RH_values))
)
class HumidityLines(object):
def __init__(self,RH_values):
self.RH_values = RH_values
def plot(self,ax):
for RH in self.RH_values:
w = [HAProps('W','T',T,'P',p,'R',RH) for T in Tdb]
ax.plot(Tdb-273.15,w,'r',lw=1)
def __str__(self):
return textwrap.dedent("""
# Humidity lines
RHValues = {RHValues:s}
for RH in RHValues:
w = [HAProps('W','T',T,'P',p,'R',RH) for T in Tdb]
ax.plot(Tdb-273.15,w,'r',lw=1)
""".format(RHValues=str(self.RH_values))
)
class EnthalpyLines(object):
def __init__(self,H_values):
self.H_values = H_values
def plot(self,ax):
for H in self.H_values:
#Line goes from saturation to zero humidity ratio for this enthalpy
T1 = HAProps('T','H',H,'P',p,'R',1.0)-273.15
T0 = HAProps('T','H',H,'P',p,'R',0.0)-273.15
w1 = HAProps('W','H',H,'P',p,'R',1.0)
w0 = HAProps('W','H',H,'P',p,'R',0.0)
ax.plot(numpy.r_[T1,T0],numpy.r_[w1,w0],'r',lw=1)
def __str__(self):
return textwrap.dedent("""
# Humidity lines
for H in {HValues:s}:
#Line goes from saturation to zero humidity ratio for this enthalpy
T1 = HAProps('T','H',H,'P',p,'R',1.0)-273.15
T0 = HAProps('T','H',H,'P',p,'R',0.0)-273.15
w1 = HAProps('W','H',H,'P',p,'R',1.0)
w0 = HAProps('W','H',H,'P',p,'R',0.0)
ax.plot(numpy.r_[T1,T0],numpy.r_[w1,w0],'r',lw=1)
""".format(HValues=str(self.H_values))
)
if __name__=='__main__':
fig=matplotlib.pyplot.figure(figsize=(10,8))
ax=fig.add_axes((0.1,0.1,0.85,0.85))
ax.set_xlim(Tdb[0]-273.15,Tdb[-1]-273.15)
ax.set_ylim(0,0.03)
ax.set_xlabel(r"Dry bulb temperature [$^{\circ}$C]")
ax.set_ylabel(r"Humidity ratio ($m_{water}/m_{dry\ air}$) [-]")
SL = SaturationLine()
SL.plot(ax)
RHL = HumidityLines([0.05,0.1,0.15,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9])
RHL.plot(ax)
RHLabels = HumidityLabels([0.05,0.1,0.15,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9], h=65)
RHLabels.plot(ax)
HL = EnthalpyLines(range(-20,100,10))
HL.plot(ax)
PF = PlotFormatting()
PF.plot(ax)
matplotlib.pyplot.show()
fp = open('PsychScript.py','w')
for chunk in [import_template,SL,RHL,HL,PF,RHLabels,closure_template]:
fp.write(str(chunk))
fp.close()
execfile('PsychScript.py')
| mit |
ulno/micropython-extra-ulno | examples/plot_log/liveplot2d.py | 2 | 4391 | #!/usr/bin/env python3
# tail a file into a plot
#
# take parameter of filename
#
# Resources:
# - http://stackoverflow.com/questions/11874767/real-time-plotting-in-while-loop-with-matplotlib
# - from: https://lethain.com/tailing-in-python/
#
# Author: ulno
# Create date: 2017-04-30
#
import time
from optparse import OptionParser
import numpy as np
import matplotlib.pyplot as plt
import numbers
SLEEP_INTERVAL = 0.02
minx = None
maxx = None
miny = None
maxy = None
interval_start = None
average_sum = 0
last_average = None
average_count = 0
point_counter = 0
def add_point(x, y, c="blue"):
global minx, maxx, miny, maxy, interval_start, average_sum, average_count
if minx is None:
minx = x
maxx = x + 1
# TODO: better init value for max
miny = y
maxy = y + 1
interval_start = x
else:
minx = min(minx, x)
maxx = max(maxx, x)
miny = min(miny, y)
maxy = max(maxy, y)
plt.axis([minx, maxx, miny, maxy])
plt.scatter(x, y, c=c)
def draw(x, y, interval, diff=False, c="blue"):
global point_counter, last_average, average_sum, average_count, interval_start
if not isinstance(x, numbers.Number) or not isinstance(y, numbers.Number):
return # don't draw if one is not a number
if interval is not None:
if interval_start == None and x is not None:
interval_start = x
if x > interval_start + interval:
current_average = last_average
if average_count > 0:
current_average = average_sum / average_count
point_counter += 1
# print("x", x,"avg", average_sum, "count", average_count, current_average)
if current_average is not None:
if diff and last_average is not None:
add_point(interval_start + interval / 2.0,
current_average - last_average, c=c)
else:
add_point(interval_start + interval / 2.0, current_average, c=c)
last_average = current_average
average_sum = 0
average_count = 0
interval_start += interval
average_sum += y
average_count += 1
else:
add_point(x, y, c=c)
point_counter += 1
def parse_lineas_tuple(l):
s = l.strip().split()
if len(s) >= 2:
retval = []
for i in s:
try:
conv = float(i)
except:
conv = None
retval.append(conv)
return retval
else:
return None
def init(fin, column, interval, diff=False, c="blue"):
global point_counter
counter = 0
for l in fin:
t = parse_lineas_tuple(l)
if t is not None and len(t) > column:
draw(t[0], t[column], interval, diff=diff, c=c)
counter += 1
if counter % 1000 == 0:
print("Read", counter, "lines,", point_counter, "valid points.")
plt.ion()
plt.show()
def tail(fin):
"Listen for new lines added to file."
while True:
where = fin.tell()
line = fin.readline()
if not line:
plt.pause(SLEEP_INTERVAL)
fin.seek(where)
else:
yield line
def main():
p = OptionParser("usage: liveplot.py file [color [[column] [interval for averaging [differential:diff ]]]]")
(options, args) = p.parse_args()
if len(args) < 1:
p.error("must at least specify a file to watch")
with open(args[0], 'r') as fin:
column = 1
color = "blue"
interval = None
if len(args) > 1:
color = args[1]
if len(args) > 2:
column = int(args[2])
if len(args) > 3:
interval = float(args[3])
if len(args) > 4:
diff = args[4].lower().startswith("diff")
else:
diff = False
init(fin, column, interval, diff=diff, c=color)
print("Read", point_counter, "valid points.")
print("Reached file end. If valid points are 0,\nno graphics is shown until there are valid points.")
for line in tail(fin):
p = parse_lineas_tuple(line)
if p is not None:
draw(p[0], p[column], interval, diff=diff, c=color)
plt.pause(SLEEP_INTERVAL)
if __name__ == '__main__':
main()
| mit |
henriquemiranda/yambo-py | tutorial/mos2/proj_mos2.py | 2 | 2428 | from __future__ import print_function, division
#
# Author: Henrique Pereira Coutada Miranda
# Example script to plot the weigth of the atomic species in the bandstructure
#
from qepy import *
import sys
import argparse
import matplotlib.pyplot as plt
folder = 'bands'
npoints = 20
p = Path([ [[0.0, 0.0, 0.0],'G'],
[[0.5, 0.0, 0.0],'M'],
[[1./3,1./3, 0.0],'K'],
[[0.0, 0.0, 0.0],'G']], [int(npoints*2),int(npoints),int(sqrt(5)*npoints)])
#parse options
parser = argparse.ArgumentParser(description='Test the yambopy script.')
parser.add_argument('-c' ,'--calc', action="store_true", help='Project orbitals')
parser.add_argument('-a' ,'--analyse', action="store_true", help='Analyse data')
parser.add_argument('-p1' ,'--plot_size', action="store_true", help='Analyse data')
parser.add_argument('-p2' ,'--plot_orbital', action="store_true", help='Analyse data')
args = parser.parse_args()
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
if args.calc:
f = open('proj.in','w')
projwfc = ProjwfcIn('mos2')
projwfc.write(folder=folder)
projwfc.run(folder=folder)
if args.analyse:
pxml = ProjwfcXML('mos2',path=folder)
# obtain the list of orbitals and quantum numbers
print(pxml)
print("Writting projections")
pxml.write_proj()
print("done!")
if args.plot_size:
pxml = ProjwfcXML('mos2',path=folder)
print(pxml)
# select orbitals to plot
# example1 mo, s2 and mos2
mo = list(range(16)) #list containing the indexes of all the orbitals of mo
s = list(range(16,48)) #list containing the indexes of all the orbitals of s
fig = plt.figure(figsize=(30,10))
for n,(orb,title) in enumerate(zip([mo,s,mo+s],['mo','s','mos2'])):
ax = plt.subplot(1,3,n+1)
plt.title(title)
pxml.plot_eigen(ax,path=p,selected_orbitals=orb,size=40)
ax.set_ylim([-7,6])
plt.show()
if args.plot_orbital:
pxml = ProjwfcXML('mos2',path=folder)
print(pxml)
# select orbitals to plot
# example1 mo, s2
mo = list(range(16)) #list containing the indexes of all the orbitals of mo
s = list(range(16,48)) #list containing the indexes of all the orbitals of s
fig = plt.figure(figsize=(8,10))
ax = plt.subplot(1,1,1)
pxml.plot_eigen(ax,path=p,selected_orbitals=mo,selected_orbitals_2=s,size=40,cmap='RdBu')
ax.set_ylim([-7,6])
plt.show()
| bsd-3-clause |
Ichaelus/Github-Classifier | Application/Models/ClassificationModules/metaonlyrandomforest.py | 1 | 2706 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from Models.FeatureProcessing import *
import sklearn
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import abc
from ClassificationModule import ClassificationModule
class metaonlyrandomforest(ClassificationModule):
"""A basic Random Forest Classifier"""
def __init__(self, n_estimators=250):
ClassificationModule.__init__(self, "Meta Only Random Forest", "Ensemble Learner with 250 Decision-Trees as base-classifier. Uses only our metadata.")
self.clf = RandomForestClassifier(n_estimators=n_estimators, class_weight = 'balanced')
print "\t-", self.name
def resetAllTraining(self):
"""Reset classification module to status before training"""
self.clf = sklearn.base.clone(self.clf)
def trainOnSample(self, sample, nb_epoch=10, shuffle=True, verbose=True):
"""Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird."""
readme_vec = self.formatInputData(sample)
label_index = getLabelIndex(sample)
return self.clf.fit(readme_vec, np.expand_dims(label_index, axis=0))
def train(self, samples, nb_epoch=10, shuffle=True, verbose=True):
"""Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)"""
train_samples = []
train_lables = []
for sample in samples:
formatted_sample = self.formatInputData(sample)[0].tolist()
train_samples.append(formatted_sample)
train_lables.append(getLabelIndex(sample))
train_lables = np.asarray(train_lables)
train_result = self.clf.fit(train_samples, train_lables)
self.isTrained = True
return train_result
def predictLabel(self, sample):
"""Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde"""
if not self.isTrained:
return 0
sample = self.formatInputData(sample)
return self.clf.predict(sample)[0]
def predictLabelAndProbability(self, sample):
"""Return the probability the module assignes each label"""
if not self.isTrained:
return [0, 0, 0, 0, 0, 0, 0, 0]
sample = self.formatInputData(sample)
prediction = self.clf.predict_proba(sample)[0]
return [np.argmax(prediction)] + list(prediction)
def formatInputData(self, sample):
"""Extract description and transform to vector"""
sd = getMetadataVector(sample)
# Returns numpy array which contains 1 array with features
return np.expand_dims(sd, axis=0)
| mit |
NicovincX2/Python-3.5 | Analyse (mathématiques)/Analyse numérique/Équations différentielles numériques/Méthode des éléments finis/hpfem2d.py | 1 | 6104 | # -*- coding: utf-8 -*-
"""
Program for generating 2D hp finite element trial functions and their
derivatives
Copyright (C) 2013 Greg von Winckel
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Created: Tue Sep 24 08:59:11 MDT 2013
Last updated: Sat Oct 5 10:15:09 MDT 2013
"""
import os
import numpy as np
import orthopoly as op
from nodes import nodes, vertex_indices, edge_indices, interior_indices
from pkdo import pkdo
from triquad import triquad
from numpy.linalg import inv
class hpfem2d(object):
def __init__(self, p):
"""
Form a basis generating object based on the (p+1)(p+2)/2
interpolation nodes
"""
self.p = p
# Construct the interpolation nodes
self.x, self.y = nodes(self.p)
# Form the PKDO Vandermonde on the nodes
V, _, _ = pkdo(self.p, self.x, self.y)
# Compute the inverse of the interpolation Vandermonde
self.Vi = inv(V)
def getInteriorTrial(self, q):
"""
Evaluate the nodal interpolating functions and their x and y
derivatives on a quadrature grid of q^2 points
"""
# Generate interior quadrature grid
xq, yq, wq = triquad(q)
# Compute Vandermondes PKDO polynomials and their derivatives on
# quadrature grid
V, Vx, Vy = pkdo(self.p, xq, yq)
# Trial functions
L = np.dot(V, self.Vi)
# x derivative of trial functions
Lx = np.dot(Vx, self.Vi)
# y derivative of trial functions
Ly = np.dot(Vy, self.Vi)
return xq, yq, wq, L, Lx, Ly
def getBoundaryTrial(self, q, edge):
"""
Evaluate the nodal interpolating functions along one of the edges
using q Legendre Gauss nodes
"""
# Gauss quadrature recursion coefficients
a, b = op.rec_jacobi(q, 0, 0)
# Legendre Gauss nodes and weights
t, wt = op.gauss(a, b)
# Affine map of [-1,1] to the appropriate triangle edge
xdict = {0: t, 1: -t, 2: -np.ones(q)}
ydict = {0: -np.ones(q), 1: t, 2: -t}
# Evaluate PKDO Vandermonde on the quadrature grid
V, _, _ = pkdo(self.p, xdict[edge], ydict[edge])
# Evaluate 2D Lagrange interpolants edge
L = np.dot(V, self.Vi)
return xdict[edge], ydict[edge], wt, L
def manufactured_solution(expression):
""" Evaluate a string for the exact symbolic solution and
create numerical function handles for all of the terms
needed to reconstruct it by solving the BVP """
from sympy import *
# Define symbolic variables for manufactured solution
x, y = symbols('x,y')
# Exact symbolic solution
u = eval(expression)
# Partial derivatives
ux = diff(u, x)
uy = diff(u, y)
# symbolic forcing function
f = -diff(ux, x) - diff(uy, y)
# Return list of numerical function handles
return [lambdify([x, y], fun, "numpy") for fun in [u, ux, uy, f]]
if __name__ == '__main__':
"""
Solve the Poisson equation with unit forcing on the
lower right triangle with Dirichlet (0), Neumann (1), and Robin (2)
conditions
"""
from scipy.linalg import solve
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Max polynomial order
p = 20
# Indices of interior and boundary points
idex = interior_indices(p)
edex = edge_indices(p)
# Instantiate FEM basis generator for this order
FEM = hpfem2d(p)
# Get function handles for the manufactured solution
u, ux, uy, f = manufactured_solution("cos(pi*(x-y)) + sin(pi*(x+y))")
# Get interior points and basis functions
xq, yq, wq, L, Lx, Ly = FEM.getInteriorTrial(p)
# Get boundary quadrature and basis functions
x1, y1, w1, L1 = FEM.getBoundaryTrial(p, 1)
x2, y2, w2, L2 = FEM.getBoundaryTrial(p, 2)
# Inner product over the elemental interior
def iprod(A, B):
return np.dot(wq * A.T, B)
# Interpolation points
x, y = FEM.x, FEM.y
# Total number of nodes
N = len(x)
# Evaluate the exact solution on edge 0 - including the vertex nodes
# because this side has a Dirichlet condition
e0 = [0, 1] + edex[0]
a = u(x[e0], y[e0])
# Evaluate the normal derivative on edge 1
b = ux(x1, y1) + uy(x1, y1)
# Evaluate the solution plus normal derivative on edge 2
c = u(x2, y2) - ux(x2, y2)
# Compute load vector
fhat = iprod(L, f(xq, yq))
# Integrate inhomogeneous boundary terms against test functions
bhat = np.dot(w1 * L1.T, b)
chat = np.dot(w2 * L2.T, c)
# Surface matrix for Robin condition on edge 2
S = np.dot(w2 * L2.T, L2)
# Stiffness matrix
K = iprod(Lx, Lx) + iprod(Ly, Ly)
# Left-hand-side
LHS = K + S
# Computed solution
psi = np.zeros(N)
# Set Dirichlet data
psi[e0] = a
# Right-hand-side
rhs = fhat + bhat + chat - np.dot(LHS, psi)
# Solve for interior points, and points on edges 1 and 2, and vertex 2
dex = idex + edex[1] + edex[2] + [2]
psi[dex] = solve(LHS[dex, :][:, dex], rhs[dex])
fig = plt.figure()
ax1 = fig.add_subplot(121, projection='3d')
ax2 = fig.add_subplot(122, projection='3d')
ax1.plot_trisurf(x, y, psi, cmap=plt.cm.CMRmap)
ax1.set_title('Computed Solution')
ax2.plot_trisurf(x, y, u(x, y), cmap=plt.cm.CMRmap)
ax2.set_title('Exact Solution')
plt.show()
os.system("pause")
| gpl-3.0 |
voxlol/scikit-learn | sklearn/utils/__init__.py | 132 | 14185 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric, DataConversionWarning)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality"""
| bsd-3-clause |
tgy/facedetect | script/plot_mblbp.py | 1 | 2423 | #!/usr/bin/env python3
'''Visualize randomly chosen mblbp features in a given window'''
import random
import os
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.animation as animation
import matplotlib.image as mpimg
from PIL import Image
COLORS = ['#1a535c', '#4ecdc4', '#ff6b6b', '#ffe66d', '#ffe66d',
'#ff6b6b', '#4ecdc4', '#1a535c']
def plot_animated_mblbp(window_w, window_h):
dpi = 96
img = Image.open('gfx/tgy.jpg').convert('LA')
fig = plt.figure(figsize=(dpi / 40, dpi / 40), dpi=dpi, frameon=False)
ax = plt.axes(xlim=(0, 20), ylim=(0, 20))
ax.imshow(img, interpolation='nearest', cmap=plt.get_cmap('gray'),
extent=[0, 20, 20, 0], alpha=0.7)
ax.set_ylim(ax.get_ylim()[::-1]) # invert y-axis
ax.xaxis.tick_top() # move x-axis to the top
ax.xaxis.set_ticks(range(1, 21))
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticks(range(1, 21))
ax.yaxis.set_ticklabels([])
for tic in ax.xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
tic.label1On = tic.label2On = False
for tic in ax.yaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
tic.label1On = tic.label2On = False
ax.grid(True, which='both', linestyle='-')
for spine in ax.spines:
ax.spines[spine].set_visible(False)
features = []
for block_w in range(3, 10, 3):
for block_h in range(3, 10, 3):
for offset_x in range(window_w - block_w + 1):
for offset_y in range(window_h - block_h + 1):
feature = {
'block_w': block_w,
'block_h': block_h,
'offset_x': offset_x,
'offset_y': offset_y,
}
features.append(feature)
random.shuffle(features)
sample = random.sample(features, 6)
for i, feature in enumerate(sample):
rectangle = patches.Rectangle(
(feature['offset_x'], feature['offset_y']),
feature['block_w'], feature['block_h'],
facecolor=COLORS[i], linewidth=1, alpha=0.4,
)
ax.add_patch(rectangle)
plt.savefig('gfx/mblbp.svg', interpolation='nearest')
plt.show()
def main():
window_w = 20
window_h = 20
plot_animated_mblbp(window_w, window_h)
if __name__ == '__main__':
main()
| mit |
cmap/cmapPy | cmapPy/math/tests/test_fast_cov.py | 1 | 17355 | import unittest
import logging
import cmapPy.pandasGEXpress.setup_GCToo_logger as setup_logger
import cmapPy.math.fast_cov as fast_cov
import numpy
import tempfile
import os
logger = logging.getLogger(setup_logger.LOGGER_NAME)
class TestFastCov(unittest.TestCase):
@staticmethod
def build_standard_x_y():
x = numpy.array([[1,2,3], [5,7,11]], dtype=float)
logger.debug("x: {}".format(x))
logger.debug("x.shape: {}".format(x.shape))
y = numpy.array([[13, 17, 19], [23, 29, 31]], dtype=float)
logger.debug("y: {}".format(y))
logger.debug("y.shape: {}".format(y.shape))
return x, y
@staticmethod
def build_nan_containing_x_y():
x = numpy.array([[1,numpy.nan,2], [3,5,7], [11,13,17]], dtype=float)
logger.debug("x:\n{}".format(x))
logger.debug("x.shape: {}".format(x.shape))
y = numpy.array([[19, 23, 29], [31, 37, 41], [numpy.nan, 43, 47]], dtype=float)
logger.debug("y:\n{}".format(y))
logger.debug("y.shape: {}".format(y.shape))
return x, y
def test_validate_inputs(self):
shape = (3,2)
#happy path just x
x = numpy.zeros(shape)
fast_cov.validate_inputs(x, None, None)
x = numpy.zeros(1)
fast_cov.validate_inputs(x, None, None)
#unhappy path just x, x does not have shape attribute
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(None, None, None)
logger.debug("unhappy path just x, x does not have shape attribute - context.exception: {}".format(context.exception))
self.assertIn("x needs to be numpy array-like", str(context.exception))
#unhappy path x does not have shape attribute, y does not have shape attribute
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(None, 3, None)
logger.debug("unhappy path x does not have shape attribute, y does not have shape attribute - context.exception: {}".format(context.exception))
self.assertIn("x needs to be numpy array-like", str(context.exception))
self.assertIn("y needs to be numpy array-like", str(context.exception))
#happy path x and y
x = numpy.zeros(shape)
y = numpy.zeros(shape)
fast_cov.validate_inputs(x, y, None)
#happy path y different shape from x
y = numpy.zeros((3,1))
fast_cov.validate_inputs(x, y, None)
#unhappy path y different shape from x, invalid axis
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(x, y.T, None)
logger.debug("unhappy path y different shape from x, invalid axis - context.exception: {}".format(context.exception))
self.assertIn("the number of rows in the x and y matrices must be the same", str(context.exception))
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(x.T, y, None)
logger.debug("unhappy path y different shape from x, invalid axis - context.exception: {}".format(context.exception))
self.assertIn("the number of rows in the x and y matrices must be the same", str(context.exception))
#happy path with x, destination
x = numpy.zeros(shape)
dest = numpy.zeros((shape[1], shape[1]))
fast_cov.validate_inputs(x, None, dest)
#unhappy path with x, destination wrong size
dest = numpy.zeros((shape[1]+1, shape[1]))
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(x, None, dest)
logger.debug("unhappy path incorrrect shape of destination for provided x - context.exception: {}".format(context.exception))
self.assertIn("x and destination provided", str(context.exception))
self.assertIn("destination must have shape matching", str(context.exception))
#happy path with x, y, destination
x = numpy.zeros(shape)
y = numpy.zeros((shape[0], shape[1]+1))
dest = numpy.zeros((shape[1], shape[1]+1))
fast_cov.validate_inputs(x, y, dest)
#unhappy path x, y, destination wrong size
dest = numpy.zeros((shape[1], shape[1]+2))
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(x, y, dest)
logger.debug("unhappy path incorrrect shape of destination for provided x, y - context.exception: {}".format(context.exception))
self.assertIn("x, y, and destination provided", str(context.exception))
self.assertIn("destination must have number of", str(context.exception))
def test_fast_cov_check_validations_run(self):
#unhappy path check that input validation checks are run
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.fast_cov(None, None)
logger.debug("unhappy path check that input validation checks are run - context.exception: {}".format(context.exception))
def test_fast_cov_just_x(self):
logger.debug("*************happy path just x")
x, _ = TestFastCov.build_standard_x_y()
ex = numpy.cov(x, rowvar=False)
logger.debug("expected ex: {}".format(ex))
r = fast_cov.fast_cov(x)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
#happy path just x, uses destination
dest = numpy.zeros((x.shape[1], x.shape[1]))
r = fast_cov.fast_cov(x, destination=dest)
logger.debug("happy path just x, uses destination - r: {}".format(r))
self.assertIs(dest, r)
self.assertTrue(numpy.allclose(ex, dest))
#happy path just x, uses destination which is a different type
dest = dest.astype(numpy.float16)
r = fast_cov.fast_cov(x, destination=dest)
logger.debug("happy path, just x, uses destination which is a different type - r: {}".format(r))
self.assertIs(dest, r)
self.assertTrue(numpy.allclose(ex, dest))
#happy path just x, uses destination that is a numpy.memmap
outfile = tempfile.mkstemp()
logger.debug("happy path, just x, uses destination which is a numpy.memmap - outfile: {}".format(outfile))
dest = numpy.memmap(outfile[1], dtype="float16", mode="w+", shape=ex.shape)
dest_array = numpy.asarray(dest)
r = fast_cov.fast_cov(x, destination=dest_array)
dest.flush()
logger.debug(" - r: {}".format(r))
os.close(outfile[0])
os.remove(outfile[1])
#happy path just x, transposed
ex = numpy.cov(x, rowvar=True)
logger.debug("happy path just x, transposed, expected ex: {}".format(ex))
r = fast_cov.fast_cov(x.T)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
def test_fast_cov_x_and_y(self):
logger.debug("*************happy path x and y")
x, y = TestFastCov.build_standard_x_y()
combined = numpy.hstack([x, y])
logger.debug("combined: {}".format(combined))
logger.debug("combined.shape: {}".format(combined.shape))
off_diag_ind = int(combined.shape[1] / 2)
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("raw expected produced from numpy.cov on full combined - raw_ex: {}".format(raw_ex))
ex = raw_ex[:off_diag_ind, off_diag_ind:]
logger.debug("expected ex: {}".format(ex))
r = fast_cov.fast_cov(x, y)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
#happy path x, y, and destination
dest = numpy.zeros((x.shape[1], y.shape[1]))
r = fast_cov.fast_cov(x, y, dest)
logger.debug("happy path x, y, and destination - r: {}".format(r))
self.assertIs(dest, r)
self.assertTrue(numpy.allclose(ex, dest))
#happy path x and y, other direction
combined = numpy.hstack([x.T, y.T])
off_diag_ind = int(combined.shape[1] / 2)
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("happy path x and y, other direction, raw expected produced from numpy.cov on full combined - raw_ex: {}".format(raw_ex))
ex = raw_ex[:off_diag_ind, off_diag_ind:]
logger.debug("expected ex: {}".format(ex))
r = fast_cov.fast_cov(x.T, y.T)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
def test_fast_cov_x_and_y_different_shapes(self):
logger.debug("*************happy path x and y different shapes")
x, _ = TestFastCov.build_standard_x_y()
y = numpy.array([[13, 17, 19, 23, 41], [23, 29, 31, 37, 43]])
logger.debug("y.shape: {}".format(y.shape))
logger.debug("y:\n{}".format(y))
combined = numpy.hstack([x, y])
logger.debug("combined: {}".format(combined))
logger.debug("combined.shape: {}".format(combined.shape))
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("raw expected produced from numpy.cov on full combined - raw_ex: {}".format(raw_ex))
logger.debug("raw_ex.shape: {}".format(raw_ex.shape))
ex = raw_ex[:x.shape[1], -y.shape[1]:]
logger.debug("expected ex: {}".format(ex))
logger.debug("ex.shape: {}".format(ex.shape))
r = fast_cov.fast_cov(x, y)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
#happy path x and y different shapes, using destination
dest = numpy.zeros((x.shape[1], y.shape[1]))
r = fast_cov.fast_cov(x, y, dest)
logger.debug("happy path x and y different shapes, using destination - r: {}".format(r))
self.assertIs(dest, r)
self.assertTrue(numpy.allclose(ex, dest))
def test_fast_cov_1D_arrays(self):
logger.debug("*****************happy path test_fast_cov_1D_arrays")
x = numpy.array(range(3))
logger.debug("x.shape: {}".format(x.shape))
r = fast_cov.fast_cov(x)
logger.debug("r: {}".format(r))
self.assertEqual(1., r[0][0])
y = numpy.array(range(3,6))
logger.debug("y.shape: {}".format(y.shape))
r = fast_cov.fast_cov(x, y)
logger.debug("r: {}".format(r))
self.assertEqual(1., r[0][0])
def test_calculate_non_mask_overlaps(self):
x = numpy.zeros((3,3))
x[0,1] = numpy.nan
x = numpy.ma.array(x, mask=numpy.isnan(x))
logger.debug("happy path x has 1 nan - x:\n{}".format(x))
r = fast_cov.calculate_non_mask_overlaps(x.mask, x.mask)
logger.debug("r:\n{}".format(r))
expected = numpy.array([[3,2,3], [2,2,2], [3,2,3]], dtype=int)
self.assertTrue(numpy.array_equal(expected, r))
def test_nan_fast_cov_just_x(self):
logger.debug("*************happy path just x")
x, _ = TestFastCov.build_nan_containing_x_y()
ex_with_nan = numpy.cov(x, rowvar=False)
logger.debug("expected with nan's - ex_with_nan:\n{}".format(ex_with_nan))
r = fast_cov.nan_fast_cov(x)
logger.debug("r:\n{}".format(r))
non_nan_locs = ~numpy.isnan(ex_with_nan)
self.assertTrue(numpy.allclose(ex_with_nan[non_nan_locs], r[non_nan_locs]))
check_nominal_nans = []
u = x[1:, 1]
for i in range(3):
t = x[1:, i]
c = numpy.cov(t, u, bias=False)[0,1]
check_nominal_nans.append(c)
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
self.assertTrue(numpy.allclose(check_nominal_nans, r[:, 1]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[1, :]))
def test_nan_fast_cov_x_and_y(self):
logger.debug("*************happy path x and y")
x, y = TestFastCov.build_nan_containing_x_y()
combined = numpy.hstack([x, y])
logger.debug("combined:\n{}".format(combined))
logger.debug("combined.shape: {}".format(combined.shape))
off_diag_ind = int(combined.shape[1] / 2)
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("raw expected produced from numpy.cov on full combined - raw_ex:\n{}".format(raw_ex))
ex = raw_ex[:off_diag_ind, off_diag_ind:]
logger.debug("expected ex:\n{}".format(ex))
r = fast_cov.nan_fast_cov(x, y)
logger.debug("r:\n{}".format(r))
non_nan_locs = ~numpy.isnan(ex)
logger.debug("ex[non_nan_locs]: {}".format(ex[non_nan_locs]))
logger.debug("r[non_nan_locs]: {}".format(r[non_nan_locs]))
self.assertTrue(numpy.allclose(ex[non_nan_locs], r[non_nan_locs]))
check_nominal_nans = []
t = x[1:, 1]
for i in [1,2]:
u = y[1:, i]
c = numpy.cov(t,u)
check_nominal_nans.append(c[0,1])
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
logger.debug("r values to compare to - r[1, 1:]: {}".format(r[1, 1:]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[1, 1:]))
check_nominal_nans = []
u = y[:2, 0]
for i in [0, 2]:
t = x[:2, i]
c = numpy.cov(t,u)
check_nominal_nans.append(c[0,1])
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
logger.debug("r values to compare to - r[[0,2], 0]: {}".format(r[[0,2], 0]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[[0,2], 0]))
self.assertTrue(numpy.isnan(r[1,0]), """expect this entry to be nan b/c for the intersection of x[:,1] and y[:,0]
there is only one entry in common, therefore covariance is undefined""")
def test_nan_fast_cov_x_and_y_different_shapes(self):
logger.debug("*************happy path x and y different shapes")
x, t = TestFastCov.build_nan_containing_x_y()
y = numpy.zeros((t.shape[0], t.shape[1]+1))
y[:, :t.shape[1]] = t
y[:, t.shape[1]] = [53, 59, 61]
logger.debug("y.shape: {}".format(y.shape))
logger.debug("y:\n{}".format(y))
combined = numpy.hstack([x, y])
logger.debug("combined:\n{}".format(combined))
logger.debug("combined.shape: {}".format(combined.shape))
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("raw expected produced from numpy.cov on full combined - raw_ex:\n{}".format(raw_ex))
logger.debug("raw_ex.shape: {}".format(raw_ex.shape))
ex = raw_ex[:x.shape[1], -y.shape[1]:]
logger.debug("expected ex:\n{}".format(ex))
logger.debug("ex.shape: {}".format(ex.shape))
r = fast_cov.nan_fast_cov(x, y)
logger.debug("r:\n{}".format(r))
non_nan_locs = ~numpy.isnan(ex)
logger.debug("ex[non_nan_locs]: {}".format(ex[non_nan_locs]))
logger.debug("r[non_nan_locs]: {}".format(r[non_nan_locs]))
self.assertTrue(numpy.allclose(ex[non_nan_locs], r[non_nan_locs]))
check_nominal_nans = []
t = x[1:, 1]
for i in [1,2,3]:
u = y[1:, i]
c = numpy.cov(t,u)
check_nominal_nans.append(c[0,1])
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
logger.debug("r values to compare to - r[1, 1:]: {}".format(r[1, 1:]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[1, 1:]))
check_nominal_nans = []
u = y[:2, 0]
for i in [0, 2]:
t = x[:2, i]
c = numpy.cov(t,u)
check_nominal_nans.append(c[0,1])
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
logger.debug("r values to compare to - r[[0,2], 0]: {}".format(r[[0,2], 0]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[[0,2], 0]))
self.assertTrue(numpy.isnan(r[1,0]), """expect this entry to be nan b/c for the intersection of x[:,1] and y[:,0]
there is only one entry in common, therefore covariance is undefined""")
def test_nan_fast_cov_all_nan(self):
x = numpy.zeros(3)
x[:] = numpy.nan
x = x[:, numpy.newaxis]
logger.debug("x:\n{}".format(x))
r = fast_cov.nan_fast_cov(x)
logger.debug("r:\n{}".format(r))
self.assertEqual(0, numpy.sum(numpy.isnan(r)))
def test_nan_fast_cov_1D_arrays(self):
logger.debug("*****************happy path test_nan_fast_cov_1D_arrays")
x = numpy.array(range(3))
logger.debug("x.shape: {}".format(x.shape))
r = fast_cov.nan_fast_cov(x)
logger.debug("r: {}".format(r))
self.assertEqual(1., r[0][0])
y = numpy.array(range(3,6))
logger.debug("y.shape: {}".format(y.shape))
r = fast_cov.fast_cov(x, y)
logger.debug("r: {}".format(r))
self.assertEqual(1., r[0][0])
if __name__ == "__main__":
setup_logger.setup(verbose=True)
unittest.main()
| bsd-3-clause |
drpngx/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_test.py | 30 | 60826 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
model_ops = dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN,
params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
wire_var = dnn_linear_combined._get_embedding_variable(
embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
for _ in range(2):
_, language_value, wire_value = sess.run(
[model_ops.train_op, language_var, wire_var])
initial_value = np.full_like(language_value, 0.1)
self.assertTrue(np.all(np.isclose(language_value, initial_value)))
self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
class ActivationFunctionTest(test.TestCase):
def _getModelForActivation(self, activation_fn):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
'activation_fn': activation_fn,
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
return dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testValidActivation(self):
_ = self._getModelForActivation('relu')
def testRaisesOnBadActivationName(self):
with self.assertRaisesRegexp(ValueError,
'Activation name should be one of'):
self._getModelForActivation('max_pool')
class DNNEstimatorTest(test.TestCase):
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNEstimator)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
dnn_estimator = dnn.DNNEstimator(
head=head_lib.multi_class_head(2, weight_column_name='w'),
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
dnn_estimator.fit(input_fn=_input_fn_train, steps=5)
scores = dnn_estimator.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
class DNNClassifierTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNClassifier)
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn.DNNClassifier(
feature_columns=[embedding_language],
hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertEqual(1, classifier.params['input_layer_min_slice_size'])
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertClassificationPredictions(
self, expected_len, n_classes, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, range(n_classes))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
fc_core.embedding_column(language_column, dimension=1),
fc_core.numeric_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predictions_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[10, 10],
label_keys=label_keys,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testPredict_AsIterableFalse(self):
"""Tests predict and predict_prob methods with as_iterable=False."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predicted_classes = classifier.predict_classes(
input_fn=_input_fn, as_iterable=False)
self._assertClassificationPredictions(3, n_classes, predicted_classes)
predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllEqual(predicted_classes, predictions)
probabilities = classifier.predict_proba(
input_fn=_input_fn, as_iterable=False)
self._assertProbabilities(3, n_classes, probabilities)
def testPredict_AsIterable(self):
"""Tests predict and predict_prob methods with as_iterable=True."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=300)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertClassificationPredictions(3, n_classes, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predicted_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, n_classes, predicted_proba)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn.DNNClassifier(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=5)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertIn('dnn/multi_class_head/centered_bias_weight',
classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
class DNNRegressorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNRegressor(
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def _assertRegressionOutputs(
self, predictions, expected_shape):
predictions_nparray = np.array(predictions)
self.assertAllEqual(expected_shape, predictions_nparray.shape)
self.assertTrue(np.issubdtype(predictions_nparray.dtype, np.floating))
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self._assertRegressionOutputs(predicted_scores, [3])
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self._assertRegressionOutputs(predicted_scores, [3])
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics that use MetricSpec."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[3, 3], config=config)
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertIn('dnn/regression_head/centered_bias_weight',
regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertNotIn('centered_bias_weight', regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = dnn.DNNRegressor(feature_columns=feature_columns, hidden_units=[3, 3])
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
ibayer/fastFM-fork | fastFM/validation.py | 1 | 10668 | # Static versions of non-core sklearn.utils functions.
# Placed here since they are subject to change.
"""Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import numbers
import warnings
import numpy as np
import scipy.sparse as sparse
from functools import wraps
def _check_matrix_is_sparse(func):
"""
Check that input is a scipy sparse matrix and raise warning otherwise.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if 'accept_sparse' in kwargs and not sparse.isspmatrix(args[0]):
raise TypeError('A dense matrix was passed in, but sparse'
'data is required.')
result = func(*args, **kwargs)
return result
return wrapper
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, order, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse is None:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
sparse_type = spmatrix.format
if dtype is None:
dtype = spmatrix.dtype
if sparse_type in accept_sparse:
# correct type
if dtype == spmatrix.dtype:
# correct dtype
if copy:
spmatrix = spmatrix.copy()
else:
# convert dtype
spmatrix = spmatrix.astype(dtype)
else:
# create new
spmatrix = spmatrix.asformat(accept_sparse[0]).astype(dtype)
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
assert_all_finite(spmatrix.data)
if hasattr(spmatrix, "data"):
spmatrix.data = np.array(spmatrix.data, copy=False, order=order)
return spmatrix
def assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
@_check_matrix_is_sparse
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
if sparse.issparse(array):
if dtype_numeric:
dtype = None
array = _ensure_sparse_format(array, accept_sparse, dtype, order,
copy, force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
if dtype_numeric:
if hasattr(array, "dtype") and getattr(array.dtype, "kind", None) == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
return array
def check_consistent_length(x1, x2):
return x1.shape[0] == x2.shape[0]
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
| bsd-3-clause |
Akshay0724/scikit-learn | sklearn/utils/testing.py | 29 | 25405 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# Giorgio Patrini
# Thierry Guillemot
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import struct
import scipy as sp
import scipy.io
from functools import wraps
from operator import itemgetter
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
import unittest
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
from nose.tools import raises
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
from numpy.testing import assert_approx_equal
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
from sklearn.cluster import DBSCAN
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal",
"assert_approx_equal", "SkipTest"]
_dummy = unittest.TestCase('__init__')
assert_equal = _dummy.assertEqual
assert_not_equal = _dummy.assertNotEqual
assert_true = _dummy.assertTrue
assert_false = _dummy.assertFalse
assert_raises = _dummy.assertRaises
SkipTest = unittest.case.SkipTest
assert_dict_equal = _dummy.assertDictEqual
assert_in = _dummy.assertIn
assert_not_in = _dummy.assertNotIn
assert_less = _dummy.assertLess
assert_greater = _dummy.assertGreater
assert_less_equal = _dummy.assertLessEqual
assert_greater_equal = _dummy.assertGreaterEqual
try:
assert_raises_regex = _dummy.assertRaisesRegex
except AttributeError:
# Python 2.7
assert_raises_regex = _dummy.assertRaisesRegexp
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the backward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: [%s]"
% (func.__name__,
', '.join(str(warning) for warning in w)))
return result
def ignore_warnings(obj=None, category=Warning):
"""Context manager and decorator to ignore warnings.
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Parameters
----------
category : warning class, defaults to Warning.
The category to filter. If Warning, all categories will be muted.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _IgnoreWarnings(category=category)(obj)
else:
return _IgnoreWarnings(category=category)
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager and decorator.
This class allows to ignore the warnings raise by a function.
Copied from Python 2.7.5 and modified as required.
Parameters
----------
category : tuple of warning class, defaut to Warning
The category to filter. By default, all the categories will be muted.
"""
def __init__(self, category):
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
self.category = category
def __call__(self, fn):
"""Decorator to catch and hide warnings without visual nesting."""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings():
warnings.simplefilter("ignore", self.category)
return fn(*args, **kwargs)
return wrapper
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter("ignore", self.category)
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
assert_less = _dummy.assertLess
assert_greater = _dummy.assertGreater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions.
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
function : callable
Calable object to raise error
*args : the positional arguments to `function`.
**kw : the keyword arguments to `function`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier", "MultiOutputEstimator",
"MultiOutputRegressor", "MultiOutputClassifier",
"OutputCodeClassifier", "OneVsRestClassifier",
"RFE", "RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV",
"SelectFromModel"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if (".tests." in modname):
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator) and
c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or "
"None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
# itemgetter is used to ensure the sort does not extend to the 2nd item of
# the tuple
return sorted(set(estimators), key=itemgetter(0))
def set_random_state(estimator, random_state=0):
"""Set random state of an estimator if it has the `random_state` param.
Classes for whom random_state is deprecated are ignored. Currently DBSCAN
is one such class.
"""
if isinstance(estimator, DBSCAN):
return
if "random_state" in estimator.get_params():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed."""
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def skip_if_32bit(func):
"""Test decorator that skips tests on 32bit platforms."""
@wraps(func)
def run_test(*args, **kwargs):
bits = 8 * struct.calcsize("P")
if bits == 32:
raise SkipTest('Test skipped on 32bit platforms.')
else:
return func(*args, **kwargs)
return run_test
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing.
Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction with
some implementation of BLAS (or other libraries that manage an internal
posix thread pool) can cause a crash or a freeze of the Python process.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OS X with.
Under Python 3.4+ it is possible to use the `forkserver` start method
for multiprocessing to avoid this issue. However it can cause pickling
errors on interactively defined functions. It therefore not enabled by
default.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin':
raise SkipTest(
"Possible multi-process bug with some BLAS")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings."""
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independence).
"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
class _named_check(object):
"""Wraps a check to show a useful description
Parameters
----------
check : function
Must have ``__name__`` and ``__call__``
arg_text : str
A summary of arguments to the check
"""
# Setting the description on the function itself can give incorrect results
# in failing tests
def __init__(self, check, arg_text):
self.check = check
self.description = ("{0[1]}.{0[3]}:{1.__name__}({2})".format(
inspect.stack()[1], check, arg_text))
def __call__(self, *args, **kwargs):
return self.check(*args, **kwargs)
| bsd-3-clause |
abimannans/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 230 | 19795 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
colour-science/colour | colour/plotting/tm3018/tests/test_report.py | 1 | 3449 | # -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.plotting.tm3018.report` module.
"""
import unittest
from matplotlib.pyplot import Axes, Figure
from colour.colorimetry import SDS_ILLUMINANTS
from colour.plotting.tm3018.report import (
plot_single_sd_colour_rendition_report_full,
plot_single_sd_colour_rendition_report_intermediate,
plot_single_sd_colour_rendition_report_simple,
plot_single_sd_colour_rendition_report)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'TestPlotSingleSdColourRenditionReportFull',
'TestPlotSingleSdColourRenditionReportIntermediate',
'TestPlotSingleSdColourRenditionReportSimple',
'TestPlotSingleSdColourRenditionReport'
]
class TestPlotSingleSdColourRenditionReportFull(unittest.TestCase):
"""
Defines :func:`colour.plotting.tm3018.report.\
plot_single_sd_colour_rendition_report_full` definition unit tests methods.
"""
def test_plot_single_sd_colour_rendition_report_full(self):
"""
Tests :func:`colour.plotting.tm3018.report.\
plot_single_sd_colour_rendition_report_full` definition.
"""
figure, axes = plot_single_sd_colour_rendition_report_full(
SDS_ILLUMINANTS['FL2'])
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotSingleSdColourRenditionReportIntermediate(unittest.TestCase):
"""
Defines :func:`colour.plotting.tm3018.report.\
plot_single_sd_colour_rendition_report_intermediate` definition unit tests
methods.
"""
def test_plot_single_sd_colour_rendition_report_intermediate(self):
"""
Tests :func:`colour.plotting.tm3018.report.\
plot_single_sd_colour_rendition_report_intermediate` definition.
"""
figure, axes = plot_single_sd_colour_rendition_report_intermediate(
SDS_ILLUMINANTS['FL2'])
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotSingleSdColourRenditionReportSimple(unittest.TestCase):
"""
Defines :func:`colour.plotting.tm3018.report.\
plot_single_sd_colour_rendition_report_simple` definition unit tests methods.
"""
def test_plot_color_vector_graphic(self):
"""
Tests :func:`colour.plotting.tm3018.report.\
plot_single_sd_colour_rendition_report_simple` definition.
"""
figure, axes = plot_single_sd_colour_rendition_report_simple(
SDS_ILLUMINANTS['FL2'])
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotSingleSdColourRenditionReport(unittest.TestCase):
"""
Defines :func:`colour.plotting.tm3018.report.\
plot_single_sd_colour_rendition_report` definition unit tests methods.
"""
def test_plot_single_sd_colour_rendition_report(self):
"""
Tests :func:`colour.plotting.tm3018.report.\
plot_single_sd_colour_rendition_report` definition.
"""
figure, axes = plot_single_sd_colour_rendition_report(
SDS_ILLUMINANTS['FL2'])
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
nrjl/GPN | plot_statruns.py | 1 | 1555 | import numpy as np
import matplotlib.pyplot as plt
import pickle
from Tkinter import Tk
from tkFileDialog import askopenfilename, askdirectory
from test_data import ObsObject
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
data_dir = askdirectory() # show an "Open" dialog box and return the path to the selected file
with open(data_dir+'/wrms.pkl', 'rb') as fh:
wrms_results = pickle.load(fh) # Dimensions n_learners, n_queries+1, n_trials
with open(data_dir+'/true_pos.pkl', 'rb') as fh:
true_pos_results = pickle.load(fh)
with open(data_dir+'/selected_error.pkl', 'rb') as fh:
selected_error = pickle.load(fh)
with open(data_dir+'/obs.pkl', 'rb') as fh:
obs_array = pickle.load(fh)
names = [l['name'] for l in obs_array]
n_queries = wrms_results.shape[1]-1
mean_err = np.mean(wrms_results, axis=2)
std_err = np.std(wrms_results, axis=2)
f0, ax0 = plt.subplots()
hl = ax0.plot(np.arange(n_queries+1), np.mean(wrms_results, axis=2).T)
f0.legend(hl, names)
ax0.set_title('Weighted RMSE')
f1, ax1 = plt.subplots()
hl1 = ax1.plot(np.arange(n_queries+1), np.mean(true_pos_results, axis=2).T/15.0)
f1.legend(hl1, names)
ax1.set_title('True positive selections')
f2, ax2 = plt.subplots()
hl2 = ax2.plot(np.arange(n_queries+1), np.mean(selected_error, axis=2).T)
f2.legend(hl2, names)
ax2.set_title('RMSE of best paths')
# hl = []
# for i in range(mean_err.shape[0]):
# hl.append(plt.errorbar(np.arange(mean_err.shape[1]), mean_err[i,:], yerr=std_err[i, :]))
# plt.legend(hl, names)
plt.show() | mit |
jm-begon/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
anurag313/scikit-learn | sklearn/kernel_ridge.py | 155 | 6545 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
nomadcube/scikit-learn | sklearn/manifold/locally_linear.py | 206 | 25061 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
boland1992/SeisSuite | seissuite/spectrum/S_spectrum.py | 6 | 14461 | # -*- coding: utf-8 -*-
"""
Created on Fri July 6 11:04:03 2015
@author: boland
"""
import os
import glob
import scipy
import datetime
import numpy as np
import datetime as dt
import multiprocessing as mp
import matplotlib.pyplot as plt
from numpy.lib.stride_tricks import as_strided
from numpy.fft import rfft, irfft
from obspy import read_inventory
from scipy import signal
from obspy import read
print "This spectrum script is running ... "
#PICKLE_PATH = '/home/boland/Desktop/XCORR-STACK_01.08.1999-10.06.2000\
#_datalesspaz.part.pickle'
#xc = pscrosscorr.load_pickled_xcorr(PICKLE_PATH)
# optimizing time-scale: max time = max distance / vmin (vmin = 2.5 km/s)
#maxdist = max([xc[s1][s2].dist() for s1, s2 in xc.pairs()])
#maxt = min(CROSSCORR_TMAX, maxdist / 2.5)
#plot distance plot of cross-correlations
#xc.plot(plot_type='distance', xlim=(-maxt, maxt),
# outfile="/home/boland/Desktop/something1342.png", showplot=False)
#------------------------------------------------------------------------------
# IMPORT PATHS TO MSEED FILES
#------------------------------------------------------------------------------
def spectrum(tr):
wave = tr.data #this is how to extract a data array from a mseed file
fs = tr.stats.sampling_rate
#hour = str(hour).zfill(2) #create correct format for eqstring
f, Pxx_spec = signal.welch(wave, fs, 'flattop', nperseg=1024, scaling='spectrum')
#plt.semilogy(f, np.sqrt(Pxx_spec))
if len(f) >= 256:
column = np.column_stack((f[:255], np.abs(np.sqrt(Pxx_spec)[:255])))
return column
else:
return 0.
# x = np.linspace(0, 10, 1000)
# f_interp = interp1d(np.sqrt(Pxx_spec),f, kind='cubic')
#x.reverse()
#y.reverse()
# print f_interp(x)
#f,np.sqrt(Pxx_spec),'o',
#plt.figure()
#plt.plot(f, Pxx_spec, '-' )
#plt.show()
def paths_sort(path):
"""
Function defined for customised sorting of the abs_paths list
and will be used in conjunction with the sorted() built in python
function in order to produce file paths in chronological order.
"""
base_name = os.path.basename(path)
stat_name = base_name.split('.')[0]
date = base_name.split('.')[1]
try:
date = datetime.datetime.strptime(date, '%Y-%m-%d')
return date, stat_name
except Exception as e:
a=4
def paths(folder_path, extension):
"""
Function that returns a list of desired absolute paths called abs_paths
of files that contains a given extension e.g. .txt should be entered as
folder_path, txt. This function will run recursively through and find
any and all files within this folder with that extension!
"""
abs_paths = []
for root, dirs, files in os.walk(folder_path):
for f in files:
fullpath = os.path.join(root, f)
if os.path.splitext(fullpath)[1] == '.{}'.format(extension):
abs_paths.append(fullpath)
abs_paths = sorted(abs_paths, key=paths_sort)
return abs_paths
folder_path = '/home/iese/Documents/Ben/UNAM/INPUT/DATA'
extension = 'msd'
paths_list = paths(folder_path, extension)
#print "paths list: \n"
#print paths_list
t0_total = datetime.datetime.now()
figs_counter = 0
fig1 = plt.figure(figsize=(15,10))
ax1 = fig1.add_subplot(111)
ax1.set_title("Seismic Waveform Power Density Spectrum\n{}".format('UNAM | 2015'))
ax1.set_xlabel('Frequency (Hz)')
ax1.set_ylabel('Power Density Spectrum (V RMS)')
ax1.set_xlim([0,40])
ax1.grid(True, axis='both', color='gray')
ax1.set_autoscaley_on(True)
ax1.set_yscale('log')
for s in paths_list:
try:
split_path = s.split('/')
stat_info = split_path[-1][:-6]
net = stat_info.split('.')[0]
year = split_path[-2].split('-')[0]
t0 = datetime.datetime.now()
st = read(s)
t1 = datetime.datetime.now()
# select only Z component
tr = st.select(component='Z')[0]
fs = tr.stats.sampling_rate
f, Pxx_spec = signal.welch(tr.data, fs, 'flattop',
nperseg=1024,
scaling='spectrum')
#column = np.column_stack((f[:255], np.abs(np.sqrt(Pxx_spec)[:255])))
#plt.semilogy(f, np.sqrt(Pxx_spec))
print "time taken to import one month mseed was: ", t1-t0
# set up loop for all traces within each imported stream.
#t0 = datetime.datetime.now()
#pool = mp.Pool()
#spectra = pool.map(spectrum, st[:])
#pool.close()
#pool.join()
t1 = datetime.datetime.now()
print "time taken to calculate monthly spectra: ", t1-t0
# Caclulate weighted average spectrum for this station for this month
#spectra = np.asarray(spectra)
#search = np.where(spectra==0.)
#spectra = np.delete(spectra, search)
#spectra = np.average(spectra, axis=0)
plt.plot(f, Pxx_spec, c='k', alpha=0.1)
except Exception as error:
print error
fig1.savefig('/home/iese/Documents/GERD_PDS.svg', format='svg', dpi=300)
plt.clf()
quit()
#plt.plot(f, np.sqrt(Pxx_spec), alpha=alpha, c='k')
#plt.xlim([0,2])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_all.svg', format='svg', dpi=1000)
plt.xlim([0,1])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_0-1Hz.svg', format='svg', dpi=1000)
plt.xlim([0,2])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_0-2Hz.svg', format='svg', dpi=1000)
plt.xlim([0,3])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_0-3Hz.svg', format='svg', dpi=1000)
plt.xlim([0,4])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_0-4Hz.svg', format='svg', dpi=1000)
plt.xlim([0,5])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_0-5Hz.svg', format='svg', dpi=1000)
plt.xlim([0,6])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_0-6Hz.svg', format='svg', dpi=1000)
plt.xlim([0,7])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_0-7Hz.svg', format='svg', dpi=1000)
plt.xlim([0,8])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_0-8Hz.svg', format='svg', dpi=1000)
plt.xlim([0,9])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_0-9Hz.svg', format='svg', dpi=1000)
t1_total = datetime.datetime.now()
print "total time taken to process and plot all PDS: ", t1_total-t0_total
quit()
def get_stationxml_inventories(stationxml_dir, verbose=False):
"""
Reads inventories in all StationXML (*.xml) files
of specified dir
@type stationxml_dir: unicode or str
@type verbose: bool
@rtype: list of L{obspy.station.inventory.Inventory}
"""
inventories = []
# list of *.xml files
flist = glob.glob(pathname=os.path.join(stationxml_dir, "*.xml"))
if verbose:
if flist:
print "Reading inventory in StationXML file:",
else:
s = u"Could not find any StationXML file (*.xml) in dir: {}!"
print s.format(stationxml_dir)
for f in flist:
if verbose:
print os.path.basename(f),
inv = read_inventory(f, format='stationxml')
inventories.append(inv)
if flist and verbose:
print
return inventories
def spectrum(tr):
wave = tr.data #this is how to extract a data array from a mseed file
fs = tr.stats.sampling_rate
#hour = str(hour).zfill(2) #create correct format for eqstring
f, Pxx_spec = signal.welch(wave, fs, 'flattop', 1024, scaling='spectrum')
#plt.semilogy(f, np.sqrt(Pxx_spec))
plt.title("Frequency Density Plot of PNG Earthquake from station PMG.IU")
plt.plot(f, np.sqrt(Pxx_spec))
plt.xlim([0, 5])
plt.xlabel('frequency [Hz]')
plt.ylabel('Linear spectrum [V RMS]')
def resample(trace, dt_resample):
"""
Subroutine to resample trace
@type trace: L{obspy.core.trace.Trace}
@type dt_resample: float
@rtype: L{obspy.core.trace.Trace}
"""
dt = 1.0 / trace.stats.sampling_rate
factor = dt_resample / dt
if int(factor) == factor:
# simple decimation (no filt because it shifts the data)
trace.decimate(int(factor), no_filter=True)
else:
# linear interpolation
tp = np.arange(0, trace.stats.npts) * trace.stats.delta
zp = trace.data
ninterp = int(max(tp) / dt_resample) + 1
tinterp = np.arange(0, ninterp) * dt_resample
trace.data = np.interp(tinterp, tp, zp)
trace.stats.npts = ninterp
trace.stats.delta = dt_resample
trace.stats.sampling_rate = 1.0 / dt_resample
#trace.stats.endtime = trace.stats.endtime + max(tinterp)-max(tp)
def moving_avg(a, halfwindow, mask=None):
"""
Performs a fast n-point moving average of (the last
dimension of) array *a*, by using stride tricks to roll
a window on *a*.
Note that *halfwindow* gives the nb of points on each side,
so that n = 2*halfwindow + 1.
If *mask* is provided, values of *a* where mask = False are
skipped.
Returns an array of same size as *a* (which means that near
the edges, the averaging window is actually < *npt*).
"""
# padding array with zeros on the left and on the right:
# e.g., if halfwindow = 2:
# a_padded = [0 0 a0 a1 ... aN 0 0]
# mask_padded = [F F ? ? ? F F]
if mask is None:
mask = np.ones_like(a, dtype='bool')
zeros = np.zeros(a.shape[:-1] + (halfwindow,))
falses = zeros.astype('bool')
a_padded = np.concatenate((zeros, np.where(mask, a, 0), zeros), axis=-1)
mask_padded = np.concatenate((falses, mask, falses), axis=-1)
# rolling window on padded array using stride trick
#
# E.g., if halfwindow=2:
# rolling_a[:, 0] = [0 0 a0 a1 ... aN]
# rolling_a[:, 1] = [0 a0 a1 a2 ... aN 0 ]
# ...
# rolling_a[:, 4] = [a2 a3 ... aN 0 0]
npt = 2 * halfwindow + 1 # total size of the averaging window
rolling_a = as_strided(a_padded,
shape=a.shape + (npt,),
strides=a_padded.strides + (a.strides[-1],))
rolling_mask = as_strided(mask_padded,
shape=mask.shape + (npt,),
strides=mask_padded.strides + (mask.strides[-1],))
# moving average
n = rolling_mask.sum(axis=-1)
return np.where(n > 0, rolling_a.sum(axis=-1).astype('float') / n, np.nan)
def butterworth(trace):
#filter
#print("first filter")
trace.filter(type="bandpass",
freqmin =freqmin,
freqmax = freqmax,
corners=corners,
zerophase=zerophase)
return trace
def normal(trace,
freqmin_earthquake,
freqmax_earthquake):
# normalization of the signal by the running mean
# in the earthquake frequency band
trcopy = trace
#print("normalising filter")
trcopy.filter(type="bandpass",
freqmin=freqmin_earthquake,
freqmax=freqmax_earthquake,
corners=corners,
zerophase=zerophase)
# decimating trace
resample(trcopy, period_resample)
# Time-normalization weights from smoothed abs(data)
# Note that trace's data can be a masked array
halfwindow = int(round(window_time * trcopy.stats.sampling_rate / 2))
mask = ~trcopy.data.mask if np.ma.isMA(trcopy.data) else None
tnorm_w = moving_avg(np.abs(trcopy.data),
halfwindow=halfwindow,
mask=mask)
if np.ma.isMA(trcopy.data):
# turning time-normalization weights into a masked array
s = "[warning: {}.{} trace's data is a masked array]"
print s.format(trace.stats.network, trace.stats.station),
tnorm_w = np.ma.masked_array(tnorm_w, trcopy.data.mask)
# time-normalization
trace.data /= tnorm_w
return trace
def whiten(trace, window_freq, freqmin, freqmax, corners, zerophase):
"""
function that produces a whitened spectrum
"""
fft = rfft(trace.data) # real FFT
deltaf = trace.stats.sampling_rate / trace.stats.npts # frequency step
# smoothing amplitude spectrum
halfwindow = int(round(window_freq / deltaf / 2.0))
weight = moving_avg(abs(fft), halfwindow=halfwindow)
# normalizing spectrum and back to time domain
trace.data = irfft(fft / weight, n=len(trace.data))
# re bandpass to avoid low/high freq noise
#print("Whiten filter")
trace.filter(type="bandpass",
freqmin =freqmin,
freqmax = freqmax,
corners=corners,
zerophase=zerophase)
return trace
def preprocess(trace):
#trace.attach_response(inventories=xml_inventories)
trace = butterworth(trace)
#trace.remove_response(output="VEL", zero_mean=True)
#plt.figure()
#spectrum(trace)
#trace = normal(trace, freqmin_earthquake, freqmax_earthquake)
#plt.figure()
#spectrum(trace)
#print(trace.stats.sampling_rate)
trace = whiten(trace, window_freq, freqmin, freqmax, corners, zerophase)
#plt.figure()
#spectrum(trace)
return trace
xcorr = 0
freqmin = 1.0/25.0
freqmax = 1.0/1
corners = 1
zerophase = True
freqmin_earthquake = 1/50.0
freqmax_earthquake = 1/25.0
window_time = 0.5 * freqmax_earthquake
window_freq = 0.02
period_resample = 0.45
STATIONXML_DIR = '/storage/ANT/PROGRAMS/ANT_OUTPUT/INPUT/XML'
xml_inventories = []
sample_rate = 250
counts = 0
for time in times:
st0 = read(dir1, starttime=time, endtime=time + dt.timedelta(minutes=XCORR_INTERVAL))
st1 = read(dir2, starttime=time, endtime=time + dt.timedelta(minutes=XCORR_INTERVAL))
tr0 = st0[0]
tr1 = st1[0]
tr0.stats.sampling_rate = sample_rate
tr1.stats.sampling_rate = sample_rate
tr0 = preprocess(tr0)
tr1 = preprocess(tr1)
xcorr = scipy.signal.correlate(tr0, tr1, mode='same')
xcorr += xcorr
plt.figure(1)
plt.plot(xcorr)
plt.show()
print(counts)
counts +=1
import matplotlib.pyplot as plt
| gpl-3.0 |
Moriadry/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py | 62 | 5053 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff.enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff.enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff.enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff.enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
psychopy/versions | psychopy/data/utils.py | 1 | 23574 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
# from future import standard_library
# standard_library.install_aliases()
from builtins import str
from builtins import range
from past.builtins import basestring
import os
import re
import ast
import pickle
import time
import codecs
import numpy as np
import pandas as pd
from collections import OrderedDict
from pkg_resources import parse_version
from psychopy import logging, exceptions
from psychopy.constants import PY3
from psychopy.tools.filetools import pathToString
try:
import openpyxl
if parse_version(openpyxl.__version__) >= parse_version('2.4.0'):
# openpyxl moved get_column_letter to utils.cell
from openpyxl.utils.cell import get_column_letter
else:
from openpyxl.cell import get_column_letter
from openpyxl.reader.excel import load_workbook
haveOpenpyxl = True
except ImportError:
haveOpenpyxl = False
try:
import xlrd
haveXlrd = True
except ImportError:
haveXlrd = False
_nonalphanumeric_re = re.compile(r'\W') # will match all bad var name chars
def checkValidFilePath(filepath, makeValid=True):
"""Checks whether file path location (e.g. is a valid folder)
This should also check whether we have write-permissions to the folder
but doesn't currently do that!
added in: 1.90.00
"""
folder = os.path.split(os.path.abspath(filepath))[0]
if not os.path.isdir(folder):
os.makedirs(folder) # spit an error if we fail
return True
def isValidVariableName(name):
"""Checks whether a certain string could be used as a valid variable.
Usage::
OK, msg = isValidVariableName(name)
>>> isValidVariableName('name')
(True, '')
>>> isValidVariableName('0name')
(False, 'Variables cannot begin with numeric character')
>>> isValidVariableName('first second')
(False, 'Variables cannot contain punctuation or spaces')
>>> isValidVariableName('')
(False, "Variables cannot be missing, None, or ''")
>>> isValidVariableName(None)
(False, "Variables cannot be missing, None, or ''")
>>> isValidVariableName(23)
(False, "Variables must be string-like")
>>> isValidVariableName('a_b_c')
(True, '')
"""
if not name:
return False, "Variables cannot be missing, None, or ''"
if not isinstance(name, basestring):
return False, "Variables must be string-like"
try:
name = str(name) # convert from unicode if possible
except Exception:
if type(name) in [str, np.unicode_]:
msg = ("name %s (type %s) contains non-ASCII characters"
" (e.g. accents)")
raise AttributeError(msg % (name, type(name)))
else:
msg = "name %s (type %s) could not be converted to a string"
raise AttributeError(msg % (name, type(name)))
if name[0].isdigit():
return False, "Variables cannot begin with numeric character"
if _nonalphanumeric_re.search(name):
return False, "Variables cannot contain punctuation or spaces"
return True, ''
def _getExcelCellName(col, row):
"""Returns the excel cell name for a row and column (zero-indexed)
>>> _getExcelCellName(0,0)
'A1'
>>> _getExcelCellName(2,1)
'C2'
"""
# BEWARE - openpyxl uses indexing at 1, to fit with Excel
return "%s%i" % (get_column_letter(col + 1), row + 1)
def importTrialTypes(fileName, returnFieldNames=False):
"""importTrialTypes is DEPRECATED (as of v1.70.00)
Please use `importConditions` for identical functionality.
"""
logging.warning("importTrialTypes is DEPRECATED (as of v1.70.00). "
"Please use `importConditions` for identical "
"functionality.")
return importConditions(fileName, returnFieldNames)
def sliceFromString(sliceString):
"""Convert a text string into a valid slice object
which can be used as indices for a list or array.
>>> sliceFromString("0:10")
slice(0,10,None)
>>> sliceFromString("0::3")
slice(0,None,3)
>>> sliceFromString("-8:")
slice(-8,None,None)
"""
sliceArgs = []
for val in sliceString.split(':'):
if len(val) == 0:
sliceArgs.append(None)
else:
sliceArgs.append(int(round(float(val))))
# nb int(round(float(x))) is needed for x='4.3'
return slice(*sliceArgs)
def indicesFromString(indsString):
"""Convert a text string into a valid list of indices
"""
# "6"
try:
inds = int(round(float(indsString)))
return [inds]
except Exception:
pass
# "-6::2"
try:
inds = sliceFromString(indsString)
return inds
except Exception:
pass
# "1,4,8"
try:
inds = list(eval(indsString))
return inds
except Exception:
pass
def listFromString(val):
"""Take a string that looks like a list (with commas and/or [] and make
an actual python list"""
if type(val) == tuple:
return list(val)
elif type(val) == list:
return list(val) # nothing to do
elif type(val) != str:
raise ValueError("_strToList requires a string as its input not {}"
.format(repr(val)))
# try to evaluate with ast (works for "'yes,'no'" or "['yes', 'no']")
try:
iterable = ast.literal_eval(val)
if type(iterable) == tuple:
iterable = list(iterable)
return iterable
except (ValueError, SyntaxError):
pass # e.g. "yes, no" won't work. We'll go on and try another way
val = val.strip() # in case there are spaces
if val.startswith(('[', '(')) and val.endswith((']', ')')):
val = val[1:-1]
asList = val.split(",")
asList = [this.strip() for this in asList]
return asList
def importConditions(fileName, returnFieldNames=False, selection=""):
"""Imports a list of conditions from an .xlsx, .csv, or .pkl file
The output is suitable as an input to :class:`TrialHandler`
`trialList` or to :class:`MultiStairHandler` as a `conditions` list.
If `fileName` ends with:
- .csv: import as a comma-separated-value file
(header + row x col)
- .xlsx: import as Excel 2007 (xlsx) files.
No support for older (.xls) is planned.
- .pkl: import from a pickle file as list of lists
(header + row x col)
The file should contain one row per type of trial needed and one column
for each parameter that defines the trial type. The first row should give
parameter names, which should:
- be unique
- begin with a letter (upper or lower case)
- contain no spaces or other punctuation (underscores are permitted)
`selection` is used to select a subset of condition indices to be used
It can be a list/array of indices, a python `slice` object or a string to
be parsed as either option.
e.g.:
- "1,2,4" or [1,2,4] or (1,2,4) are the same
- "2:5" # 2, 3, 4 (doesn't include last whole value)
- "-10:2:" # tenth from last to the last in steps of 2
- slice(-10, 2, None) # the same as above
- random(5) * 8 # five random vals 0-7
"""
def _attemptImport(fileName, sep=',', dec='.'):
"""Attempts to import file with specified settings and raises
ConditionsImportError if fails due to invalid format
:param filename: str
:param sep: str indicating the separator for cells (',', ';' etc)
:param dec: str indicating the decimal point ('.', '.')
:return: trialList, fieldNames
"""
if fileName.endswith(('.csv', '.tsv')):
trialsArr = pd.read_csv(fileName, encoding='utf-8-sig',
sep=sep, decimal=dec)
logging.debug(u"Read csv file with pandas: {}".format(fileName))
elif fileName.endswith(('.xlsx', '.xls', '.xlsm')):
trialsArr = pd.read_excel(fileName)
logging.debug(u"Read Excel file with pandas: {}".format(fileName))
# then try to convert array to trialList and fieldnames
unnamed = trialsArr.columns.to_series().str.contains('^Unnamed: ')
trialsArr = trialsArr.loc[:, ~unnamed] # clear unnamed cols
logging.debug(u"Clearing unnamed columns from {}".format(fileName))
trialList, fieldNames = pandasToDictList(trialsArr)
return trialList, fieldNames
def _assertValidVarNames(fieldNames, fileName):
"""screens a list of names as candidate variable names. if all
names are OK, return silently; else raise with msg
"""
fileName = pathToString(fileName)
if not all(fieldNames):
msg = ('Conditions file %s: Missing parameter name(s); '
'empty cell(s) in the first row?')
raise exceptions.ConditionsImportError(msg % fileName)
for name in fieldNames:
OK, msg = isValidVariableName(name)
if not OK:
# tailor message to importConditions
msg = msg.replace('Variables', 'Parameters (column headers)')
raise exceptions.ConditionsImportError(
'Conditions file %s: %s%s"%s"' %
(fileName, msg, os.linesep * 2, name))
if fileName in ['None', 'none', None]:
if returnFieldNames:
return [], []
return []
if not os.path.isfile(fileName):
msg = 'Conditions file not found: %s'
raise ValueError(msg % os.path.abspath(fileName))
def pandasToDictList(dataframe):
"""Convert a pandas dataframe to a list of dicts.
This helper function is used by csv or excel imports via pandas
"""
# convert the resulting dataframe to a numpy recarray
trialsArr = dataframe.to_records(index=False)
# Check for new line characters in strings, and replace escaped characters
for record in trialsArr:
for idx, element in enumerate(record):
if isinstance(element, str):
record[idx] = element.replace('\\n', '\n')
if trialsArr.shape == ():
# convert 0-D to 1-D with one element:
trialsArr = trialsArr[np.newaxis]
fieldNames = list(trialsArr.dtype.names)
_assertValidVarNames(fieldNames, fileName)
# convert the record array into a list of dicts
trialList = []
for trialN, trialType in enumerate(trialsArr):
thisTrial = OrderedDict()
for fieldN, fieldName in enumerate(fieldNames):
val = trialsArr[trialN][fieldN]
if isinstance(val, basestring):
if val.startswith('[') and val.endswith(']'):
# val = eval('%s' %unicode(val.decode('utf8')))
val = eval(val)
elif type(val) == np.string_:
val = str(val.decode('utf-8-sig'))
# if it looks like a list, convert it:
if val.startswith('[') and val.endswith(']'):
# val = eval('%s' %unicode(val.decode('utf8')))
val = eval(val)
elif np.isnan(val):
val = None
thisTrial[fieldName] = val
trialList.append(thisTrial)
return trialList, fieldNames
if (fileName.endswith(('.csv', '.tsv'))
or (fileName.endswith(('.xlsx', '.xls', '.xlsm')) and haveXlrd)):
if fileName.endswith(('.csv', '.tsv', '.dlm')): # delimited text file
for sep, dec in [ (',', '.'), (';', ','), # most common in US, EU
('\t', '.'), ('\t', ','), (';', '.')]:
try:
trialList, fieldNames = _attemptImport(fileName=fileName,
sep=sep, dec=dec)
break # seems to have worked
except exceptions.ConditionsImportError as e:
continue # try a different format
else:
trialList, fieldNames = _attemptImport(fileName=fileName)
elif fileName.endswith(('.xlsx','.xlsm')): # no xlsread so use openpyxl
if not haveOpenpyxl:
raise ImportError('openpyxl or xlrd is required for loading excel '
'files, but neither was found.')
# data_only was added in 1.8
if parse_version(openpyxl.__version__) < parse_version('1.8'):
wb = load_workbook(filename=fileName)
else:
wb = load_workbook(filename=fileName, data_only=True)
ws = wb.worksheets[0]
logging.debug(u"Read excel file with openpyxl: {}".format(fileName))
try:
# in new openpyxl (2.3.4+) get_highest_xx is deprecated
nCols = ws.max_column
nRows = ws.max_row
except Exception:
# version openpyxl 1.5.8 (in Standalone 1.80) needs this
nCols = ws.get_highest_column()
nRows = ws.get_highest_row()
# get parameter names from the first row header
fieldNames = []
for colN in range(nCols):
if parse_version(openpyxl.__version__) < parse_version('2.0'):
fieldName = ws.cell(_getExcelCellName(col=colN, row=0)).value
else:
# From 2.0, cells are referenced with 1-indexing: A1 == cell(row=1, column=1)
fieldName = ws.cell(row=1, column=colN + 1).value
fieldNames.append(fieldName)
_assertValidVarNames(fieldNames, fileName)
# loop trialTypes
trialList = []
for rowN in range(1, nRows): # skip header first row
thisTrial = {}
for colN in range(nCols):
if parse_version(openpyxl.__version__) < parse_version('2.0'):
val = ws.cell(_getExcelCellName(col=colN, row=0)).value
else:
# From 2.0, cells are referenced with 1-indexing: A1 == cell(row=1, column=1)
val = ws.cell(row=rowN + 1, column=colN + 1).value
# if it looks like a list or tuple, convert it
if (isinstance(val, basestring) and
(val.startswith('[') and val.endswith(']') or
val.startswith('(') and val.endswith(')'))):
val = eval(val)
fieldName = fieldNames[colN]
thisTrial[fieldName] = val
trialList.append(thisTrial)
elif fileName.endswith('.pkl'):
f = open(fileName, 'rb')
# Converting newline characters.
if PY3:
# 'b' is necessary in Python3 because byte object is
# returned when file is opened in binary mode.
buffer = f.read().replace(b'\r\n',b'\n').replace(b'\r',b'\n')
else:
buffer = f.read().replace('\r\n','\n').replace('\r','\n')
try:
trialsArr = pickle.loads(buffer)
except Exception:
raise IOError('Could not open %s as conditions' % fileName)
f.close()
trialList = []
if PY3:
# In Python3, strings returned by pickle() is unhashable.
# So, we have to convert them to str.
trialsArr = [[str(item) if isinstance(item, str) else item
for item in row] for row in trialsArr]
fieldNames = trialsArr[0] # header line first
_assertValidVarNames(fieldNames, fileName)
for row in trialsArr[1:]:
thisTrial = {}
for fieldN, fieldName in enumerate(fieldNames):
# type is correct, being .pkl
thisTrial[fieldName] = row[fieldN]
trialList.append(thisTrial)
else:
raise IOError('Your conditions file should be an '
'xlsx, csv, dlm, tsv or pkl file')
# if we have a selection then try to parse it
if isinstance(selection, basestring) and len(selection) > 0:
selection = indicesFromString(selection)
if not isinstance(selection, slice):
for n in selection:
try:
assert n == int(n)
except AssertionError:
raise TypeError("importConditions() was given some "
"`indices` but could not parse them")
# the selection might now be a slice or a series of indices
if isinstance(selection, slice):
trialList = trialList[selection]
elif len(selection) > 0:
allConds = trialList
trialList = []
print(selection)
print(len(allConds))
for ii in selection:
trialList.append(allConds[int(ii)])
logging.exp('Imported %s as conditions, %d conditions, %d params' %
(fileName, len(trialList), len(fieldNames)))
if returnFieldNames:
return (trialList, fieldNames)
else:
return trialList
def createFactorialTrialList(factors):
"""Create a trialList by entering a list of factors with names (keys)
and levels (values) it will return a trialList in which all factors
have been factorially combined (so for example if there are two factors
with 3 and 5 levels the trialList will be a list of 3*5 = 15, each
specifying the values for a given trial
Usage::
trialList = createFactorialTrialList(factors)
:Parameters:
factors : a dictionary with names (keys) and levels (values) of the
factors
Example::
factors={"text": ["red", "green", "blue"],
"letterColor": ["red", "green"],
"size": [0, 1]}
mytrials = createFactorialTrialList(factors)
"""
# the first step is to place all the factorial combinations in a list of
# lists
tempListOfLists = [[]]
for key in factors:
# this takes the levels of each factor as a set of values
# (a list) at a time
alist = factors[key]
tempList = []
for value in alist:
# now we loop over the values in a given list,
# and add each value of the other lists
for iterList in tempListOfLists:
tempList.append(iterList + [key, value])
tempListOfLists = tempList
# this second step is so we can return a list in the format of trialList
trialList = []
for atrial in tempListOfLists:
keys = atrial[0::2] # the even elements are keys
values = atrial[1::2] # the odd elements are values
atrialDict = {}
for i in range(len(keys)):
# this combines the key with the value
atrialDict[keys[i]] = values[i]
# append one trial at a time to the final trialList
trialList.append(atrialDict)
return trialList
def bootStraps(dat, n=1):
"""Create a list of n bootstrapped resamples of the data
SLOW IMPLEMENTATION (Python for-loop)
Usage:
``out = bootStraps(dat, n=1)``
Where:
dat
an NxM or 1xN array (each row is a different condition, each
column is a different trial)
n
number of bootstrapped resamples to create
out
- dim[0]=conditions
- dim[1]=trials
- dim[2]=resamples
"""
dat = np.asarray(dat)
if len(dat.shape) == 1:
# have presumably been given a series of data for one stimulus
# adds a dimension (arraynow has shape (1,Ntrials))
dat = np.array([dat])
nTrials = dat.shape[1]
# initialise a matrix to store output
resamples = np.zeros(dat.shape + (n,), dat.dtype)
rand = np.random.rand
for stimulusN in range(dat.shape[0]):
thisStim = dat[stimulusN, :] # fetch data for this stimulus
for sampleN in range(n):
indices = np.floor(nTrials * rand(nTrials)).astype('i')
resamples[stimulusN, :, sampleN] = np.take(thisStim, indices)
return resamples
def functionFromStaircase(intensities, responses, bins=10):
"""Create a psychometric function by binning data from a staircase
procedure. Although the default is 10 bins Jon now always uses 'unique'
bins (fewer bins looks pretty but leads to errors in slope estimation)
usage::
intensity, meanCorrect, n = functionFromStaircase(intensities,
responses, bins)
where:
intensities
are a list (or array) of intensities to be binned
responses
are a list of 0,1 each corresponding to the equivalent
intensity value
bins
can be an integer (giving that number of bins) or 'unique'
(each bin is made from aa data for exactly one intensity
value)
intensity
a numpy array of intensity values (where each is the center
of an intensity bin)
meanCorrect
a numpy array of mean % correct in each bin
n
a numpy array of number of responses contributing to each mean
"""
# convert to arrays
try:
# concatenate if multidimensional
intensities = np.concatenate(intensities)
responses = np.concatenate(responses)
except Exception:
intensities = np.array(intensities)
responses = np.array(responses)
# sort the responses
sort_ii = np.argsort(intensities)
sortedInten = np.take(intensities, sort_ii)
sortedResp = np.take(responses, sort_ii)
binnedResp = []
binnedInten = []
nPoints = []
if bins == 'unique':
intensities = np.round(intensities, decimals=8)
uniqueIntens = np.unique(intensities)
for thisInten in uniqueIntens:
theseResps = responses[intensities == thisInten]
binnedInten.append(thisInten)
binnedResp.append(np.mean(theseResps))
nPoints.append(len(theseResps))
else:
pointsPerBin = len(intensities)/bins
for binN in range(bins):
start = int(round(binN * pointsPerBin))
stop = int(round((binN + 1) * pointsPerBin))
thisResp = sortedResp[start:stop]
thisInten = sortedInten[start:stop]
binnedResp.append(np.mean(thisResp))
binnedInten.append(np.mean(thisInten))
nPoints.append(len(thisInten))
return binnedInten, binnedResp, nPoints
def getDateStr(format="%Y_%b_%d_%H%M"):
"""Uses ``time.strftime()``_ to generate a string of the form
2012_Apr_19_1531 for 19th April 3.31pm, 2012.
This is often useful appended to data filenames to provide unique names.
To include the year: getDateStr(format="%Y_%b_%d_%H%M")
returns '2011_Mar_16_1307' depending on locale, can have unicode chars
in month names, so utf_8_decode them
For date in the format of the current localization, do:
data.getDateStr(format=locale.nl_langinfo(locale.D_T_FMT))
"""
now = time.strftime(format, time.localtime())
if PY3:
return now
else:
try:
now_decoded = codecs.utf_8_decode(now)[0]
except UnicodeDecodeError:
# '2011_03_16_1307'
now_decoded = time.strftime("%Y_%m_%d_%H%M", time.localtime())
return now_decoded
| gpl-3.0 |
mne-tools/mne-python | mne/cov.py | 4 | 79191 | # Authors: Alexandre Gramfort <[email protected]>
# Matti Hämäläinen <[email protected]>
# Denis A. Engemann <[email protected]>
#
# License: BSD (3-clause)
from copy import deepcopy
from distutils.version import LooseVersion
import itertools as itt
from math import log
import os
import numpy as np
from .defaults import _EXTRAPOLATE_DEFAULT, _BORDER_DEFAULT, DEFAULTS
from .io.write import start_file, end_file
from .io.proj import (make_projector, _proj_equal, activate_proj,
_check_projs, _needs_eeg_average_ref_proj,
_has_eeg_average_ref_proj, _read_proj, _write_proj)
from .io import fiff_open, RawArray
from .io.pick import (pick_types, pick_channels_cov, pick_channels, pick_info,
_picks_by_type, _pick_data_channels, _picks_to_idx,
_DATA_CH_TYPES_SPLIT)
from .io.constants import FIFF
from .io.meas_info import _read_bad_channels, create_info
from .io.tag import find_tag
from .io.tree import dir_tree_find
from .io.write import (start_block, end_block, write_int, write_name_list,
write_double, write_float_matrix, write_string)
from .defaults import _handle_default
from .epochs import Epochs
from .event import make_fixed_length_events
from .evoked import EvokedArray
from .rank import compute_rank
from .utils import (check_fname, logger, verbose, check_version, _time_mask,
warn, copy_function_doc_to_method_doc, _pl,
_undo_scaling_cov, _scaled_array, _validate_type,
_check_option, eigh, fill_doc, _on_missing,
_check_on_missing)
from . import viz
from .fixes import (BaseEstimator, EmpiricalCovariance, _logdet,
empirical_covariance, log_likelihood)
def _check_covs_algebra(cov1, cov2):
if cov1.ch_names != cov2.ch_names:
raise ValueError('Both Covariance do not have the same list of '
'channels.')
projs1 = [str(c) for c in cov1['projs']]
projs2 = [str(c) for c in cov1['projs']]
if projs1 != projs2:
raise ValueError('Both Covariance do not have the same list of '
'SSP projections.')
def _get_tslice(epochs, tmin, tmax):
"""Get the slice."""
mask = _time_mask(epochs.times, tmin, tmax, sfreq=epochs.info['sfreq'])
tstart = np.where(mask)[0][0] if tmin is not None else None
tend = np.where(mask)[0][-1] + 1 if tmax is not None else None
tslice = slice(tstart, tend, None)
return tslice
@fill_doc
class Covariance(dict):
"""Noise covariance matrix.
.. warning:: This class should not be instantiated directly, but
instead should be created using a covariance reading or
computation function.
Parameters
----------
data : array-like
The data.
names : list of str
Channel names.
bads : list of str
Bad channels.
projs : list
Projection vectors.
nfree : int
Degrees of freedom.
eig : array-like | None
Eigenvalues.
eigvec : array-like | None
Eigenvectors.
method : str | None
The method used to compute the covariance.
loglik : float
The log likelihood.
%(verbose_meth)s
Attributes
----------
data : array of shape (n_channels, n_channels)
The covariance.
ch_names : list of str
List of channels' names.
nfree : int
Number of degrees of freedom i.e. number of time points used.
dim : int
The number of channels ``n_channels``.
See Also
--------
compute_covariance
compute_raw_covariance
make_ad_hoc_cov
read_cov
"""
def __init__(self, data, names, bads, projs, nfree, eig=None, eigvec=None,
method=None, loglik=None, verbose=None):
"""Init of covariance."""
diag = (data.ndim == 1)
projs = _check_projs(projs)
self.update(data=data, dim=len(data), names=names, bads=bads,
nfree=nfree, eig=eig, eigvec=eigvec, diag=diag,
projs=projs, kind=FIFF.FIFFV_MNE_NOISE_COV)
if method is not None:
self['method'] = method
if loglik is not None:
self['loglik'] = loglik
self.verbose = verbose
@property
def data(self):
"""Numpy array of Noise covariance matrix."""
return self['data']
@property
def ch_names(self):
"""Channel names."""
return self['names']
@property
def nfree(self):
"""Number of degrees of freedom."""
return self['nfree']
def save(self, fname):
"""Save covariance matrix in a FIF file.
Parameters
----------
fname : str
Output filename.
"""
check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz',
'_cov.fif', '_cov.fif.gz'))
fid = start_file(fname)
try:
_write_cov(fid, self)
except Exception:
fid.close()
os.remove(fname)
raise
end_file(fid)
def copy(self):
"""Copy the Covariance object.
Returns
-------
cov : instance of Covariance
The copied object.
"""
return deepcopy(self)
def as_diag(self):
"""Set covariance to be processed as being diagonal.
Returns
-------
cov : dict
The covariance.
Notes
-----
This function allows creation of inverse operators
equivalent to using the old "--diagnoise" mne option.
This function operates in place.
"""
if self['diag']:
return self
self['diag'] = True
self['data'] = np.diag(self['data'])
self['eig'] = None
self['eigvec'] = None
return self
def _as_square(self):
# This is a hack but it works because np.diag() behaves nicely
if self['diag']:
self['diag'] = False
self.as_diag()
self['diag'] = False
return self
def _get_square(self):
if self['diag'] != (self.data.ndim == 1):
raise RuntimeError(
'Covariance attributes inconsistent, got data with '
'dimensionality %d but diag=%s'
% (self.data.ndim, self['diag']))
return np.diag(self.data) if self['diag'] else self.data.copy()
def __repr__(self): # noqa: D105
if self.data.ndim == 2:
s = 'size : %s x %s' % self.data.shape
else: # ndim == 1
s = 'diagonal : %s' % self.data.size
s += ", n_samples : %s" % self.nfree
s += ", data : %s" % self.data
return "<Covariance | %s>" % s
def __add__(self, cov):
"""Add Covariance taking into account number of degrees of freedom."""
_check_covs_algebra(self, cov)
this_cov = cov.copy()
this_cov['data'] = (((this_cov['data'] * this_cov['nfree']) +
(self['data'] * self['nfree'])) /
(self['nfree'] + this_cov['nfree']))
this_cov['nfree'] += self['nfree']
this_cov['bads'] = list(set(this_cov['bads']).union(self['bads']))
return this_cov
def __iadd__(self, cov):
"""Add Covariance taking into account number of degrees of freedom."""
_check_covs_algebra(self, cov)
self['data'][:] = (((self['data'] * self['nfree']) +
(cov['data'] * cov['nfree'])) /
(self['nfree'] + cov['nfree']))
self['nfree'] += cov['nfree']
self['bads'] = list(set(self['bads']).union(cov['bads']))
return self
@verbose
@copy_function_doc_to_method_doc(viz.misc.plot_cov)
def plot(self, info, exclude=[], colorbar=True, proj=False, show_svd=True,
show=True, verbose=None):
return viz.misc.plot_cov(self, info, exclude, colorbar, proj, show_svd,
show, verbose)
@verbose
def plot_topomap(self, info, ch_type=None, vmin=None,
vmax=None, cmap=None, sensors=True, colorbar=True,
scalings=None, units=None, res=64,
size=1, cbar_fmt="%3.1f",
proj=False, show=True, show_names=False, title=None,
mask=None, mask_params=None, outlines='head',
contours=6, image_interp='bilinear',
axes=None, extrapolate=_EXTRAPOLATE_DEFAULT, sphere=None,
border=_BORDER_DEFAULT,
noise_cov=None, verbose=None):
"""Plot a topomap of the covariance diagonal.
Parameters
----------
info : instance of Info
The measurement information.
%(topomap_ch_type)s
%(topomap_vmin_vmax)s
%(topomap_cmap)s
%(topomap_sensors)s
%(topomap_colorbar)s
%(topomap_scalings)s
%(topomap_units)s
%(topomap_res)s
%(topomap_size)s
%(topomap_cbar_fmt)s
%(plot_proj)s
%(show)s
%(topomap_show_names)s
%(title_None)s
%(topomap_mask)s
%(topomap_mask_params)s
%(topomap_outlines)s
%(topomap_contours)s
%(topomap_image_interp)s
%(topomap_axes)s
%(topomap_extrapolate)s
%(topomap_sphere_auto)s
%(topomap_border)s
noise_cov : instance of Covariance | None
If not None, whiten the instance with ``noise_cov`` before
plotting.
%(verbose)s
Returns
-------
fig : instance of Figure
The matplotlib figure.
Notes
-----
.. versionadded:: 0.21
"""
from .viz.misc import _index_info_cov
info, C, _, _ = _index_info_cov(info, self, exclude=())
evoked = EvokedArray(np.diag(C)[:, np.newaxis], info)
if noise_cov is not None:
# need to left and right multiply whitener, which for the diagonal
# entries is the same as multiplying twice
evoked = whiten_evoked(whiten_evoked(evoked, noise_cov), noise_cov)
if units is None:
units = 'AU'
if scalings is None:
scalings = 1.
if units is None:
units = {k: f'({v})²' for k, v in DEFAULTS['units'].items()}
if scalings is None:
scalings = {k: v * v for k, v in DEFAULTS['scalings'].items()}
return evoked.plot_topomap(
times=[0], ch_type=ch_type, vmin=vmin, vmax=vmax, cmap=cmap,
sensors=sensors, colorbar=colorbar, scalings=scalings,
units=units, res=res, size=size, cbar_fmt=cbar_fmt,
proj=proj, show=show, show_names=show_names, title=title,
mask=mask, mask_params=mask_params, outlines=outlines,
contours=contours, image_interp=image_interp, axes=axes,
extrapolate=extrapolate, sphere=sphere, border=border,
time_format='')
def pick_channels(self, ch_names, ordered=False):
"""Pick channels from this covariance matrix.
Parameters
----------
ch_names : list of str
List of channels to keep. All other channels are dropped.
ordered : bool
If True (default False), ensure that the order of the channels
matches the order of ``ch_names``.
Returns
-------
cov : instance of Covariance.
The modified covariance matrix.
Notes
-----
Operates in-place.
.. versionadded:: 0.20.0
"""
return pick_channels_cov(self, ch_names, exclude=[], ordered=ordered,
copy=False)
###############################################################################
# IO
@verbose
def read_cov(fname, verbose=None):
"""Read a noise covariance from a FIF file.
Parameters
----------
fname : str
The name of file containing the covariance matrix. It should end with
-cov.fif or -cov.fif.gz.
%(verbose)s
Returns
-------
cov : Covariance
The noise covariance matrix.
See Also
--------
write_cov, compute_covariance, compute_raw_covariance
"""
check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz',
'_cov.fif', '_cov.fif.gz'))
f, tree = fiff_open(fname)[:2]
with f as fid:
return Covariance(**_read_cov(fid, tree, FIFF.FIFFV_MNE_NOISE_COV,
limited=True))
###############################################################################
# Estimate from data
@verbose
def make_ad_hoc_cov(info, std=None, verbose=None):
"""Create an ad hoc noise covariance.
Parameters
----------
info : instance of Info
Measurement info.
std : dict of float | None
Standard_deviation of the diagonal elements. If dict, keys should be
``'grad'`` for gradiometers, ``'mag'`` for magnetometers and ``'eeg'``
for EEG channels. If None, default values will be used (see Notes).
%(verbose)s
Returns
-------
cov : instance of Covariance
The ad hoc diagonal noise covariance for the M/EEG data channels.
Notes
-----
The default noise values are 5 fT/cm, 20 fT, and 0.2 µV for gradiometers,
magnetometers, and EEG channels respectively.
.. versionadded:: 0.9.0
"""
picks = pick_types(info, meg=True, eeg=True, exclude=())
std = _handle_default('noise_std', std)
data = np.zeros(len(picks))
for meg, eeg, val in zip(('grad', 'mag', False), (False, False, True),
(std['grad'], std['mag'], std['eeg'])):
these_picks = pick_types(info, meg=meg, eeg=eeg)
data[np.searchsorted(picks, these_picks)] = val * val
ch_names = [info['ch_names'][pick] for pick in picks]
return Covariance(data, ch_names, info['bads'], info['projs'], nfree=0)
def _check_n_samples(n_samples, n_chan):
"""Check to see if there are enough samples for reliable cov calc."""
n_samples_min = 10 * (n_chan + 1) // 2
if n_samples <= 0:
raise ValueError('No samples found to compute the covariance matrix')
if n_samples < n_samples_min:
warn('Too few samples (required : %d got : %d), covariance '
'estimate may be unreliable' % (n_samples_min, n_samples))
@verbose
def compute_raw_covariance(raw, tmin=0, tmax=None, tstep=0.2, reject=None,
flat=None, picks=None, method='empirical',
method_params=None, cv=3, scalings=None, n_jobs=1,
return_estimators=False, reject_by_annotation=True,
rank=None, verbose=None):
"""Estimate noise covariance matrix from a continuous segment of raw data.
It is typically useful to estimate a noise covariance from empty room
data or time intervals before starting the stimulation.
.. note:: To estimate the noise covariance from epoched data, use
:func:`mne.compute_covariance` instead.
Parameters
----------
raw : instance of Raw
Raw data.
tmin : float
Beginning of time interval in seconds. Defaults to 0.
tmax : float | None (default None)
End of time interval in seconds. If None (default), use the end of the
recording.
tstep : float (default 0.2)
Length of data chunks for artifact rejection in seconds.
Can also be None to use a single epoch of (tmax - tmin)
duration. This can use a lot of memory for large ``Raw``
instances.
reject : dict | None (default None)
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat : dict | None (default None)
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
%(picks_good_data_noref)s
method : str | list | None (default 'empirical')
The method used for covariance estimation.
See :func:`mne.compute_covariance`.
.. versionadded:: 0.12
method_params : dict | None (default None)
Additional parameters to the estimation procedure.
See :func:`mne.compute_covariance`.
.. versionadded:: 0.12
cv : int | sklearn.model_selection object (default 3)
The cross validation method. Defaults to 3, which will
internally trigger by default :class:`sklearn.model_selection.KFold`
with 3 splits.
.. versionadded:: 0.12
scalings : dict | None (default None)
Defaults to ``dict(mag=1e15, grad=1e13, eeg=1e6)``.
These defaults will scale magnetometers and gradiometers
at the same unit.
.. versionadded:: 0.12
%(n_jobs)s
.. versionadded:: 0.12
return_estimators : bool (default False)
Whether to return all estimators or the best. Only considered if
method equals 'auto' or is a list of str. Defaults to False.
.. versionadded:: 0.12
%(reject_by_annotation_epochs)s
.. versionadded:: 0.14
%(rank_None)s
.. versionadded:: 0.17
.. versionadded:: 0.18
Support for 'info' mode.
%(verbose)s
Returns
-------
cov : instance of Covariance | list
The computed covariance. If method equals 'auto' or is a list of str
and return_estimators equals True, a list of covariance estimators is
returned (sorted by log-likelihood, from high to low, i.e. from best
to worst).
See Also
--------
compute_covariance : Estimate noise covariance matrix from epoched data.
Notes
-----
This function will:
1. Partition the data into evenly spaced, equal-length epochs.
2. Load them into memory.
3. Subtract the mean across all time points and epochs for each channel.
4. Process the :class:`Epochs` by :func:`compute_covariance`.
This will produce a slightly different result compared to using
:func:`make_fixed_length_events`, :class:`Epochs`, and
:func:`compute_covariance` directly, since that would (with the recommended
baseline correction) subtract the mean across time *for each epoch*
(instead of across epochs) for each channel.
"""
tmin = 0. if tmin is None else float(tmin)
dt = 1. / raw.info['sfreq']
tmax = raw.times[-1] + dt if tmax is None else float(tmax)
tstep = tmax - tmin if tstep is None else float(tstep)
tstep_m1 = tstep - dt # inclusive!
events = make_fixed_length_events(raw, 1, tmin, tmax, tstep)
logger.info('Using up to %s segment%s' % (len(events), _pl(events)))
# don't exclude any bad channels, inverses expect all channels present
if picks is None:
# Need to include all channels e.g. if eog rejection is to be used
picks = np.arange(raw.info['nchan'])
pick_mask = np.in1d(
picks, _pick_data_channels(raw.info, with_ref_meg=False))
else:
pick_mask = slice(None)
picks = _picks_to_idx(raw.info, picks)
epochs = Epochs(raw, events, 1, 0, tstep_m1, baseline=None,
picks=picks, reject=reject, flat=flat, verbose=False,
preload=False, proj=False,
reject_by_annotation=reject_by_annotation)
if method is None:
method = 'empirical'
if isinstance(method, str) and method == 'empirical':
# potentially *much* more memory efficient to do it the iterative way
picks = picks[pick_mask]
data = 0
n_samples = 0
mu = 0
# Read data in chunks
for raw_segment in epochs:
raw_segment = raw_segment[pick_mask]
mu += raw_segment.sum(axis=1)
data += np.dot(raw_segment, raw_segment.T)
n_samples += raw_segment.shape[1]
_check_n_samples(n_samples, len(picks))
data -= mu[:, None] * (mu[None, :] / n_samples)
data /= (n_samples - 1.0)
logger.info("Number of samples used : %d" % n_samples)
logger.info('[done]')
ch_names = [raw.info['ch_names'][k] for k in picks]
bads = [b for b in raw.info['bads'] if b in ch_names]
return Covariance(data, ch_names, bads, raw.info['projs'],
nfree=n_samples - 1)
del picks, pick_mask
# This makes it equivalent to what we used to do (and do above for
# empirical mode), treating all epochs as if they were a single long one
epochs.load_data()
ch_means = epochs._data.mean(axis=0).mean(axis=1)
epochs._data -= ch_means[np.newaxis, :, np.newaxis]
# fake this value so there are no complaints from compute_covariance
epochs.baseline = (None, None)
return compute_covariance(epochs, keep_sample_mean=True, method=method,
method_params=method_params, cv=cv,
scalings=scalings, n_jobs=n_jobs,
return_estimators=return_estimators,
rank=rank)
def _check_method_params(method, method_params, keep_sample_mean=True,
name='method', allow_auto=True, rank=None):
"""Check that method and method_params are usable."""
accepted_methods = ('auto', 'empirical', 'diagonal_fixed', 'ledoit_wolf',
'oas', 'shrunk', 'pca', 'factor_analysis', 'shrinkage')
_method_params = {
'empirical': {'store_precision': False, 'assume_centered': True},
'diagonal_fixed': {'store_precision': False, 'assume_centered': True},
'ledoit_wolf': {'store_precision': False, 'assume_centered': True},
'oas': {'store_precision': False, 'assume_centered': True},
'shrinkage': {'shrinkage': 0.1, 'store_precision': False,
'assume_centered': True},
'shrunk': {'shrinkage': np.logspace(-4, 0, 30),
'store_precision': False, 'assume_centered': True},
'pca': {'iter_n_components': None},
'factor_analysis': {'iter_n_components': None}
}
for ch_type in _DATA_CH_TYPES_SPLIT:
_method_params['diagonal_fixed'][ch_type] = 0.1
if isinstance(method_params, dict):
for key, values in method_params.items():
if key not in _method_params:
raise ValueError('key (%s) must be "%s"' %
(key, '" or "'.join(_method_params)))
_method_params[key].update(method_params[key])
shrinkage = method_params.get('shrinkage', {}).get('shrinkage', 0.1)
if not 0 <= shrinkage <= 1:
raise ValueError('shrinkage must be between 0 and 1, got %s'
% (shrinkage,))
was_auto = False
if method is None:
method = ['empirical']
elif method == 'auto' and allow_auto:
was_auto = True
method = ['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis']
if not isinstance(method, (list, tuple)):
method = [method]
if not all(k in accepted_methods for k in method):
raise ValueError(
'Invalid {name} ({method}). Accepted values (individually or '
'in a list) are any of "{accepted_methods}" or None.'.format(
name=name, method=method, accepted_methods=accepted_methods))
if not (isinstance(rank, str) and rank == 'full'):
if was_auto:
method.pop(method.index('factor_analysis'))
for method_ in method:
if method_ in ('pca', 'factor_analysis'):
raise ValueError('%s can so far only be used with rank="full",'
' got rank=%r' % (method_, rank))
if not keep_sample_mean:
if len(method) != 1 or 'empirical' not in method:
raise ValueError('`keep_sample_mean=False` is only supported'
'with %s="empirical"' % (name,))
for p, v in _method_params.items():
if v.get('assume_centered', None) is False:
raise ValueError('`assume_centered` must be True'
' if `keep_sample_mean` is False')
return method, _method_params
@verbose
def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None,
projs=None, method='empirical', method_params=None,
cv=3, scalings=None, n_jobs=1, return_estimators=False,
on_mismatch='raise', rank=None, verbose=None):
"""Estimate noise covariance matrix from epochs.
The noise covariance is typically estimated on pre-stimulus periods
when the stimulus onset is defined from events.
If the covariance is computed for multiple event types (events
with different IDs), the following two options can be used and combined:
1. either an Epochs object for each event type is created and
a list of Epochs is passed to this function.
2. an Epochs object is created for multiple events and passed
to this function.
.. note:: To estimate the noise covariance from non-epoched raw data, such
as an empty-room recording, use
:func:`mne.compute_raw_covariance` instead.
Parameters
----------
epochs : instance of Epochs, or list of Epochs
The epochs.
keep_sample_mean : bool (default True)
If False, the average response over epochs is computed for
each event type and subtracted during the covariance
computation. This is useful if the evoked response from a
previous stimulus extends into the baseline period of the next.
Note. This option is only implemented for method='empirical'.
tmin : float | None (default None)
Start time for baseline. If None start at first sample.
tmax : float | None (default None)
End time for baseline. If None end at last sample.
projs : list of Projection | None (default None)
List of projectors to use in covariance calculation, or None
to indicate that the projectors from the epochs should be
inherited. If None, then projectors from all epochs must match.
method : str | list | None (default 'empirical')
The method used for covariance estimation. If 'empirical' (default),
the sample covariance will be computed. A list can be passed to
perform estimates using multiple methods.
If 'auto' or a list of methods, the best estimator will be determined
based on log-likelihood and cross-validation on unseen data as
described in :footcite:`EngemannGramfort2015`. Valid methods are
'empirical', 'diagonal_fixed', 'shrunk', 'oas', 'ledoit_wolf',
'factor_analysis', 'shrinkage', and 'pca' (see Notes). If ``'auto'``,
it expands to::
['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis']
``'factor_analysis'`` is removed when ``rank`` is not 'full'.
The ``'auto'`` mode is not recommended if there are many
segments of data, since computation can take a long time.
.. versionadded:: 0.9.0
method_params : dict | None (default None)
Additional parameters to the estimation procedure. Only considered if
method is not None. Keys must correspond to the value(s) of ``method``.
If None (default), expands to the following (with the addition of
``{'store_precision': False, 'assume_centered': True} for all methods
except ``'factor_analysis'`` and ``'pca'``)::
{'diagonal_fixed': {'grad': 0.1, 'mag': 0.1, 'eeg': 0.1, ...},
'shrinkage': {'shrikage': 0.1},
'shrunk': {'shrinkage': np.logspace(-4, 0, 30)},
'pca': {'iter_n_components': None},
'factor_analysis': {'iter_n_components': None}}
cv : int | sklearn.model_selection object (default 3)
The cross validation method. Defaults to 3, which will
internally trigger by default :class:`sklearn.model_selection.KFold`
with 3 splits.
scalings : dict | None (default None)
Defaults to ``dict(mag=1e15, grad=1e13, eeg=1e6)``.
These defaults will scale data to roughly the same order of
magnitude.
%(n_jobs)s
return_estimators : bool (default False)
Whether to return all estimators or the best. Only considered if
method equals 'auto' or is a list of str. Defaults to False.
on_mismatch : str
What to do when the MEG<->Head transformations do not match between
epochs. If "raise" (default) an error is raised, if "warn" then a
warning is emitted, if "ignore" then nothing is printed. Having
mismatched transforms can in some cases lead to unexpected or
unstable results in covariance calculation, e.g. when data
have been processed with Maxwell filtering but not transformed
to the same head position.
%(rank_None)s
.. versionadded:: 0.17
.. versionadded:: 0.18
Support for 'info' mode.
%(verbose)s
Returns
-------
cov : instance of Covariance | list
The computed covariance. If method equals 'auto' or is a list of str
and return_estimators equals True, a list of covariance estimators is
returned (sorted by log-likelihood, from high to low, i.e. from best
to worst).
See Also
--------
compute_raw_covariance : Estimate noise covariance from raw data, such as
empty-room recordings.
Notes
-----
Baseline correction or sufficient high-passing should be used
when creating the :class:`Epochs` to ensure that the data are zero mean,
otherwise the computed covariance matrix will be inaccurate.
Valid ``method`` strings are:
* ``'empirical'``
The empirical or sample covariance (default)
* ``'diagonal_fixed'``
A diagonal regularization based on channel types as in
:func:`mne.cov.regularize`.
* ``'shrinkage'``
Fixed shrinkage.
.. versionadded:: 0.16
* ``'ledoit_wolf'``
The Ledoit-Wolf estimator, which uses an
empirical formula for the optimal shrinkage value
:footcite:`LedoitWolf2004`.
* ``'oas'``
The OAS estimator :footcite:`ChenEtAl2010`, which uses a different
empricial formula for the optimal shrinkage value.
.. versionadded:: 0.16
* ``'shrunk'``
Like 'ledoit_wolf', but with cross-validation
for optimal alpha.
* ``'pca'``
Probabilistic PCA with low rank :footcite:`TippingBishop1999`.
* ``'factor_analysis'``
Factor analysis with low rank :footcite:`Barber2012`.
``'ledoit_wolf'`` and ``'pca'`` are similar to ``'shrunk'`` and
``'factor_analysis'``, respectively, except that they use
cross validation (which is useful when samples are correlated, which
is often the case for M/EEG data). The former two are not included in
the ``'auto'`` mode to avoid redundancy.
For multiple event types, it is also possible to create a
single :class:`Epochs` object with events obtained using
:func:`mne.merge_events`. However, the resulting covariance matrix
will only be correct if ``keep_sample_mean is True``.
The covariance can be unstable if the number of samples is small.
In that case it is common to regularize the covariance estimate.
The ``method`` parameter allows to regularize the covariance in an
automated way. It also allows to select between different alternative
estimation algorithms which themselves achieve regularization.
Details are described in :footcite:`EngemannGramfort2015`.
For more information on the advanced estimation methods, see
:ref:`the sklearn manual <sklearn:covariance>`.
References
----------
.. footbibliography::
"""
# scale to natural unit for best stability with MEG/EEG
scalings = _check_scalings_user(scalings)
method, _method_params = _check_method_params(
method, method_params, keep_sample_mean, rank=rank)
del method_params
# for multi condition support epochs is required to refer to a list of
# epochs objects
def _unpack_epochs(epochs):
if len(epochs.event_id) > 1:
epochs = [epochs[k] for k in epochs.event_id]
else:
epochs = [epochs]
return epochs
if not isinstance(epochs, list):
epochs = _unpack_epochs(epochs)
else:
epochs = sum([_unpack_epochs(epoch) for epoch in epochs], [])
# check for baseline correction
if any(epochs_t.baseline is None and epochs_t.info['highpass'] < 0.5 and
keep_sample_mean for epochs_t in epochs):
warn('Epochs are not baseline corrected, covariance '
'matrix may be inaccurate')
orig = epochs[0].info['dev_head_t']
_check_on_missing(on_mismatch, 'on_mismatch')
for ei, epoch in enumerate(epochs):
epoch.info._check_consistency()
if (orig is None) != (epoch.info['dev_head_t'] is None) or \
(orig is not None and not
np.allclose(orig['trans'],
epoch.info['dev_head_t']['trans'])):
msg = ('MEG<->Head transform mismatch between epochs[0]:\n%s\n\n'
'and epochs[%s]:\n%s'
% (orig, ei, epoch.info['dev_head_t']))
_on_missing(on_mismatch, msg, 'on_mismatch')
bads = epochs[0].info['bads']
if projs is None:
projs = epochs[0].info['projs']
# make sure Epochs are compatible
for epochs_t in epochs[1:]:
if epochs_t.proj != epochs[0].proj:
raise ValueError('Epochs must agree on the use of projections')
for proj_a, proj_b in zip(epochs_t.info['projs'], projs):
if not _proj_equal(proj_a, proj_b):
raise ValueError('Epochs must have same projectors')
projs = _check_projs(projs)
ch_names = epochs[0].ch_names
# make sure Epochs are compatible
for epochs_t in epochs[1:]:
if epochs_t.info['bads'] != bads:
raise ValueError('Epochs must have same bad channels')
if epochs_t.ch_names != ch_names:
raise ValueError('Epochs must have same channel names')
picks_list = _picks_by_type(epochs[0].info)
picks_meeg = np.concatenate([b for _, b in picks_list])
picks_meeg = np.sort(picks_meeg)
ch_names = [epochs[0].ch_names[k] for k in picks_meeg]
info = epochs[0].info # we will overwrite 'epochs'
if not keep_sample_mean:
# prepare mean covs
n_epoch_types = len(epochs)
data_mean = [0] * n_epoch_types
n_samples = np.zeros(n_epoch_types, dtype=np.int64)
n_epochs = np.zeros(n_epoch_types, dtype=np.int64)
for ii, epochs_t in enumerate(epochs):
tslice = _get_tslice(epochs_t, tmin, tmax)
for e in epochs_t:
e = e[picks_meeg, tslice]
if not keep_sample_mean:
data_mean[ii] += e
n_samples[ii] += e.shape[1]
n_epochs[ii] += 1
n_samples_epoch = n_samples // n_epochs
norm_const = np.sum(n_samples_epoch * (n_epochs - 1))
data_mean = [1.0 / n_epoch * np.dot(mean, mean.T) for n_epoch, mean
in zip(n_epochs, data_mean)]
info = pick_info(info, picks_meeg)
tslice = _get_tslice(epochs[0], tmin, tmax)
epochs = [ee.get_data(picks=picks_meeg)[..., tslice] for ee in epochs]
picks_meeg = np.arange(len(picks_meeg))
picks_list = _picks_by_type(info)
if len(epochs) > 1:
epochs = np.concatenate(epochs, 0)
else:
epochs = epochs[0]
epochs = np.hstack(epochs)
n_samples_tot = epochs.shape[-1]
_check_n_samples(n_samples_tot, len(picks_meeg))
epochs = epochs.T # sklearn | C-order
cov_data = _compute_covariance_auto(
epochs, method=method, method_params=_method_params, info=info,
cv=cv, n_jobs=n_jobs, stop_early=True, picks_list=picks_list,
scalings=scalings, rank=rank)
if keep_sample_mean is False:
cov = cov_data['empirical']['data']
# undo scaling
cov *= (n_samples_tot - 1)
# ... apply pre-computed class-wise normalization
for mean_cov in data_mean:
cov -= mean_cov
cov /= norm_const
covs = list()
for this_method, data in cov_data.items():
cov = Covariance(data.pop('data'), ch_names, info['bads'], projs,
nfree=n_samples_tot - 1)
# add extra info
cov.update(method=this_method, **data)
covs.append(cov)
logger.info('Number of samples used : %d' % n_samples_tot)
covs.sort(key=lambda c: c['loglik'], reverse=True)
if len(covs) > 1:
msg = ['log-likelihood on unseen data (descending order):']
for c in covs:
msg.append('%s: %0.3f' % (c['method'], c['loglik']))
logger.info('\n '.join(msg))
if return_estimators:
out = covs
else:
out = covs[0]
logger.info('selecting best estimator: {}'.format(out['method']))
else:
out = covs[0]
logger.info('[done]')
return out
def _check_scalings_user(scalings):
if isinstance(scalings, dict):
for k, v in scalings.items():
_check_option('the keys in `scalings`', k, ['mag', 'grad', 'eeg'])
elif scalings is not None and not isinstance(scalings, np.ndarray):
raise TypeError('scalings must be a dict, ndarray, or None, got %s'
% type(scalings))
scalings = _handle_default('scalings', scalings)
return scalings
def _eigvec_subspace(eig, eigvec, mask):
"""Compute the subspace from a subset of eigenvectors."""
# We do the same thing we do with projectors:
P = np.eye(len(eigvec)) - np.dot(eigvec[~mask].conj().T, eigvec[~mask])
eig, eigvec = eigh(P)
eigvec = eigvec.conj().T
return eig, eigvec
def _get_iid_kwargs():
import sklearn
kwargs = dict()
if LooseVersion(sklearn.__version__) < LooseVersion('0.22'):
kwargs['iid'] = False
return kwargs
def _compute_covariance_auto(data, method, info, method_params, cv,
scalings, n_jobs, stop_early, picks_list, rank):
"""Compute covariance auto mode."""
# rescale to improve numerical stability
orig_rank = rank
rank = compute_rank(RawArray(data.T, info, copy=None, verbose=False),
rank, scalings, info)
with _scaled_array(data.T, picks_list, scalings):
C = np.dot(data.T, data)
_, eigvec, mask = _smart_eigh(C, info, rank, proj_subspace=True,
do_compute_rank=False)
eigvec = eigvec[mask]
data = np.dot(data, eigvec.T)
used = np.where(mask)[0]
sub_picks_list = [(key, np.searchsorted(used, picks))
for key, picks in picks_list]
sub_info = pick_info(info, used) if len(used) != len(mask) else info
logger.info('Reducing data rank from %s -> %s'
% (len(mask), eigvec.shape[0]))
estimator_cov_info = list()
msg = 'Estimating covariance using %s'
ok_sklearn = check_version('sklearn')
if not ok_sklearn and (len(method) != 1 or method[0] != 'empirical'):
raise ValueError('scikit-learn is not installed, `method` must be '
'`empirical`, got %s' % (method,))
for method_ in method:
data_ = data.copy()
name = method_.__name__ if callable(method_) else method_
logger.info(msg % name.upper())
mp = method_params[method_]
_info = {}
if method_ == 'empirical':
est = EmpiricalCovariance(**mp)
est.fit(data_)
estimator_cov_info.append((est, est.covariance_, _info))
del est
elif method_ == 'diagonal_fixed':
est = _RegCovariance(info=sub_info, **mp)
est.fit(data_)
estimator_cov_info.append((est, est.covariance_, _info))
del est
elif method_ == 'ledoit_wolf':
from sklearn.covariance import LedoitWolf
shrinkages = []
lw = LedoitWolf(**mp)
for ch_type, picks in sub_picks_list:
lw.fit(data_[:, picks])
shrinkages.append((ch_type, lw.shrinkage_, picks))
sc = _ShrunkCovariance(shrinkage=shrinkages, **mp)
sc.fit(data_)
estimator_cov_info.append((sc, sc.covariance_, _info))
del lw, sc
elif method_ == 'oas':
from sklearn.covariance import OAS
shrinkages = []
oas = OAS(**mp)
for ch_type, picks in sub_picks_list:
oas.fit(data_[:, picks])
shrinkages.append((ch_type, oas.shrinkage_, picks))
sc = _ShrunkCovariance(shrinkage=shrinkages, **mp)
sc.fit(data_)
estimator_cov_info.append((sc, sc.covariance_, _info))
del oas, sc
elif method_ == 'shrinkage':
sc = _ShrunkCovariance(**mp)
sc.fit(data_)
estimator_cov_info.append((sc, sc.covariance_, _info))
del sc
elif method_ == 'shrunk':
from sklearn.model_selection import GridSearchCV
from sklearn.covariance import ShrunkCovariance
shrinkage = mp.pop('shrinkage')
tuned_parameters = [{'shrinkage': shrinkage}]
shrinkages = []
gs = GridSearchCV(ShrunkCovariance(**mp),
tuned_parameters, cv=cv, **_get_iid_kwargs())
for ch_type, picks in sub_picks_list:
gs.fit(data_[:, picks])
shrinkages.append((ch_type, gs.best_estimator_.shrinkage,
picks))
shrinkages = [c[0] for c in zip(shrinkages)]
sc = _ShrunkCovariance(shrinkage=shrinkages, **mp)
sc.fit(data_)
estimator_cov_info.append((sc, sc.covariance_, _info))
del shrinkage, sc
elif method_ == 'pca':
assert orig_rank == 'full'
pca, _info = _auto_low_rank_model(
data_, method_, n_jobs=n_jobs, method_params=mp, cv=cv,
stop_early=stop_early)
pca.fit(data_)
estimator_cov_info.append((pca, pca.get_covariance(), _info))
del pca
elif method_ == 'factor_analysis':
assert orig_rank == 'full'
fa, _info = _auto_low_rank_model(
data_, method_, n_jobs=n_jobs, method_params=mp, cv=cv,
stop_early=stop_early)
fa.fit(data_)
estimator_cov_info.append((fa, fa.get_covariance(), _info))
del fa
else:
raise ValueError('Oh no! Your estimator does not have'
' a .fit method')
logger.info('Done.')
if len(method) > 1:
logger.info('Using cross-validation to select the best estimator.')
out = dict()
for ei, (estimator, cov, runtime_info) in \
enumerate(estimator_cov_info):
if len(method) > 1:
loglik = _cross_val(data, estimator, cv, n_jobs)
else:
loglik = None
# project back
cov = np.dot(eigvec.T, np.dot(cov, eigvec))
# undo bias
cov *= data.shape[0] / (data.shape[0] - 1)
# undo scaling
_undo_scaling_cov(cov, picks_list, scalings)
method_ = method[ei]
name = method_.__name__ if callable(method_) else method_
out[name] = dict(loglik=loglik, data=cov, estimator=estimator)
out[name].update(runtime_info)
return out
def _gaussian_loglik_scorer(est, X, y=None):
"""Compute the Gaussian log likelihood of X under the model in est."""
# compute empirical covariance of the test set
precision = est.get_precision()
n_samples, n_features = X.shape
log_like = -.5 * (X * (np.dot(X, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi) - _logdet(precision))
out = np.mean(log_like)
return out
def _cross_val(data, est, cv, n_jobs):
"""Compute cross validation."""
from sklearn.model_selection import cross_val_score
return np.mean(cross_val_score(est, data, cv=cv, n_jobs=n_jobs,
scoring=_gaussian_loglik_scorer))
def _auto_low_rank_model(data, mode, n_jobs, method_params, cv,
stop_early=True, verbose=None):
"""Compute latent variable models."""
method_params = deepcopy(method_params)
iter_n_components = method_params.pop('iter_n_components')
if iter_n_components is None:
iter_n_components = np.arange(5, data.shape[1], 5)
from sklearn.decomposition import PCA, FactorAnalysis
if mode == 'factor_analysis':
est = FactorAnalysis
else:
assert mode == 'pca'
est = PCA
est = est(**method_params)
est.n_components = 1
scores = np.empty_like(iter_n_components, dtype=np.float64)
scores.fill(np.nan)
# make sure we don't empty the thing if it's a generator
max_n = max(list(deepcopy(iter_n_components)))
if max_n > data.shape[1]:
warn('You are trying to estimate %i components on matrix '
'with %i features.' % (max_n, data.shape[1]))
for ii, n in enumerate(iter_n_components):
est.n_components = n
try: # this may fail depending on rank and split
score = _cross_val(data=data, est=est, cv=cv, n_jobs=n_jobs)
except ValueError:
score = np.inf
if np.isinf(score) or score > 0:
logger.info('... infinite values encountered. stopping estimation')
break
logger.info('... rank: %i - loglik: %0.3f' % (n, score))
if score != -np.inf:
scores[ii] = score
if (ii >= 3 and np.all(np.diff(scores[ii - 3:ii]) < 0) and stop_early):
# early stop search when loglik has been going down 3 times
logger.info('early stopping parameter search.')
break
# happens if rank is too low right form the beginning
if np.isnan(scores).all():
raise RuntimeError('Oh no! Could not estimate covariance because all '
'scores were NaN. Please contact the MNE-Python '
'developers.')
i_score = np.nanargmax(scores)
best = est.n_components = iter_n_components[i_score]
logger.info('... best model at rank = %i' % best)
runtime_info = {'ranks': np.array(iter_n_components),
'scores': scores,
'best': best,
'cv': cv}
return est, runtime_info
###############################################################################
# Sklearn Estimators
class _RegCovariance(BaseEstimator):
"""Aux class."""
def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1,
ecog=0.1, hbo=0.1, hbr=0.1, fnirs_cw_amplitude=0.1,
fnirs_fd_ac_amplitude=0.1, fnirs_fd_phase=0.1, fnirs_od=0.1,
csd=0.1, dbs=0.1, store_precision=False,
assume_centered=False):
self.info = info
# For sklearn compat, these cannot (easily?) be combined into
# a single dictionary
self.grad = grad
self.mag = mag
self.eeg = eeg
self.seeg = seeg
self.dbs = dbs
self.ecog = ecog
self.hbo = hbo
self.hbr = hbr
self.fnirs_cw_amplitude = fnirs_cw_amplitude
self.fnirs_fd_ac_amplitude = fnirs_fd_ac_amplitude
self.fnirs_fd_phase = fnirs_fd_phase
self.fnirs_od = fnirs_od
self.csd = csd
self.store_precision = store_precision
self.assume_centered = assume_centered
def fit(self, X):
"""Fit covariance model with classical diagonal regularization."""
self.estimator_ = EmpiricalCovariance(
store_precision=self.store_precision,
assume_centered=self.assume_centered)
self.covariance_ = self.estimator_.fit(X).covariance_
self.covariance_ = 0.5 * (self.covariance_ + self.covariance_.T)
cov_ = Covariance(
data=self.covariance_, names=self.info['ch_names'],
bads=self.info['bads'], projs=self.info['projs'],
nfree=len(self.covariance_))
cov_ = regularize(
cov_, self.info, proj=False, exclude='bads',
grad=self.grad, mag=self.mag, eeg=self.eeg,
ecog=self.ecog, seeg=self.seeg, dbs=self.dbs,
hbo=self.hbo, hbr=self.hbr, rank='full')
self.estimator_.covariance_ = self.covariance_ = cov_.data
return self
def score(self, X_test, y=None):
"""Delegate call to modified EmpiricalCovariance instance."""
return self.estimator_.score(X_test, y=y)
def get_precision(self):
"""Delegate call to modified EmpiricalCovariance instance."""
return self.estimator_.get_precision()
class _ShrunkCovariance(BaseEstimator):
"""Aux class."""
def __init__(self, store_precision, assume_centered,
shrinkage=0.1):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.shrinkage = shrinkage
def fit(self, X):
"""Fit covariance model with oracle shrinkage regularization."""
from sklearn.covariance import shrunk_covariance
self.estimator_ = EmpiricalCovariance(
store_precision=self.store_precision,
assume_centered=self.assume_centered)
cov = self.estimator_.fit(X).covariance_
if not isinstance(self.shrinkage, (list, tuple)):
shrinkage = [('all', self.shrinkage, np.arange(len(cov)))]
else:
shrinkage = self.shrinkage
zero_cross_cov = np.zeros_like(cov, dtype=bool)
for a, b in itt.combinations(shrinkage, 2):
picks_i, picks_j = a[2], b[2]
ch_ = a[0], b[0]
if 'eeg' in ch_:
zero_cross_cov[np.ix_(picks_i, picks_j)] = True
zero_cross_cov[np.ix_(picks_j, picks_i)] = True
self.zero_cross_cov_ = zero_cross_cov
# Apply shrinkage to blocks
for ch_type, c, picks in shrinkage:
sub_cov = cov[np.ix_(picks, picks)]
cov[np.ix_(picks, picks)] = shrunk_covariance(sub_cov,
shrinkage=c)
# Apply shrinkage to cross-cov
for a, b in itt.combinations(shrinkage, 2):
shrinkage_i, shrinkage_j = a[1], b[1]
picks_i, picks_j = a[2], b[2]
c_ij = np.sqrt((1. - shrinkage_i) * (1. - shrinkage_j))
cov[np.ix_(picks_i, picks_j)] *= c_ij
cov[np.ix_(picks_j, picks_i)] *= c_ij
# Set to zero the necessary cross-cov
if np.any(zero_cross_cov):
cov[zero_cross_cov] = 0.0
self.estimator_.covariance_ = self.covariance_ = cov
return self
def score(self, X_test, y=None):
"""Delegate to modified EmpiricalCovariance instance."""
# compute empirical covariance of the test set
test_cov = empirical_covariance(X_test - self.estimator_.location_,
assume_centered=True)
if np.any(self.zero_cross_cov_):
test_cov[self.zero_cross_cov_] = 0.
res = log_likelihood(test_cov, self.estimator_.get_precision())
return res
def get_precision(self):
"""Delegate to modified EmpiricalCovariance instance."""
return self.estimator_.get_precision()
###############################################################################
# Writing
def write_cov(fname, cov):
"""Write a noise covariance matrix.
Parameters
----------
fname : str
The name of the file. It should end with -cov.fif or -cov.fif.gz.
cov : Covariance
The noise covariance matrix.
See Also
--------
read_cov
"""
cov.save(fname)
###############################################################################
# Prepare for inverse modeling
def _unpack_epochs(epochs):
"""Aux Function."""
if len(epochs.event_id) > 1:
epochs = [epochs[k] for k in epochs.event_id]
else:
epochs = [epochs]
return epochs
def _get_ch_whitener(A, pca, ch_type, rank):
"""Get whitener params for a set of channels."""
# whitening operator
eig, eigvec = eigh(A, overwrite_a=True)
eigvec = eigvec.conj().T
mask = np.ones(len(eig), bool)
eig[:-rank] = 0.0
mask[:-rank] = False
logger.info(' Setting small %s eigenvalues to zero (%s)'
% (ch_type, 'using PCA' if pca else 'without PCA'))
if pca: # No PCA case.
# This line will reduce the actual number of variables in data
# and leadfield to the true rank.
eigvec = eigvec[:-rank].copy()
return eig, eigvec, mask
@verbose
def prepare_noise_cov(noise_cov, info, ch_names=None, rank=None,
scalings=None, on_rank_mismatch='ignore', verbose=None):
"""Prepare noise covariance matrix.
Parameters
----------
noise_cov : instance of Covariance
The noise covariance to process.
info : dict
The measurement info (used to get channel types and bad channels).
ch_names : list | None
The channel names to be considered. Can be None to use
``info['ch_names']``.
%(rank_None)s
.. versionadded:: 0.18
Support for 'info' mode.
scalings : dict | None
Data will be rescaled before rank estimation to improve accuracy.
If dict, it will override the following dict (default if None)::
dict(mag=1e12, grad=1e11, eeg=1e5)
%(on_rank_mismatch)s
%(verbose)s
Returns
-------
cov : instance of Covariance
A copy of the covariance with the good channels subselected
and parameters updated.
"""
# reorder C and info to match ch_names order
noise_cov_idx = list()
missing = list()
ch_names = info['ch_names'] if ch_names is None else ch_names
for c in ch_names:
# this could be try/except ValueError, but it is not the preferred way
if c in noise_cov.ch_names:
noise_cov_idx.append(noise_cov.ch_names.index(c))
else:
missing.append(c)
if len(missing):
raise RuntimeError('Not all channels present in noise covariance:\n%s'
% missing)
C = noise_cov._get_square()[np.ix_(noise_cov_idx, noise_cov_idx)]
info = pick_info(info, pick_channels(info['ch_names'], ch_names))
projs = info['projs'] + noise_cov['projs']
noise_cov = Covariance(
data=C, names=ch_names, bads=list(noise_cov['bads']),
projs=deepcopy(noise_cov['projs']), nfree=noise_cov['nfree'],
method=noise_cov.get('method', None),
loglik=noise_cov.get('loglik', None))
eig, eigvec, _ = _smart_eigh(noise_cov, info, rank, scalings, projs,
ch_names, on_rank_mismatch=on_rank_mismatch)
noise_cov.update(eig=eig, eigvec=eigvec)
return noise_cov
@verbose
def _smart_eigh(C, info, rank, scalings=None, projs=None,
ch_names=None, proj_subspace=False, do_compute_rank=True,
on_rank_mismatch='ignore', verbose=None):
"""Compute eigh of C taking into account rank and ch_type scalings."""
scalings = _handle_default('scalings_cov_rank', scalings)
projs = info['projs'] if projs is None else projs
ch_names = info['ch_names'] if ch_names is None else ch_names
if info['ch_names'] != ch_names:
info = pick_info(info, [info['ch_names'].index(c) for c in ch_names])
assert info['ch_names'] == ch_names
n_chan = len(ch_names)
# Create the projection operator
proj, ncomp, _ = make_projector(projs, ch_names)
if isinstance(C, Covariance):
C = C['data']
if ncomp > 0:
logger.info(' Created an SSP operator (subspace dimension = %d)'
% ncomp)
C = np.dot(proj, np.dot(C, proj.T))
noise_cov = Covariance(C, ch_names, [], projs, 0)
if do_compute_rank: # if necessary
rank = compute_rank(
noise_cov, rank, scalings, info, on_rank_mismatch=on_rank_mismatch)
assert C.ndim == 2 and C.shape[0] == C.shape[1]
# time saving short-circuit
if proj_subspace and sum(rank.values()) == C.shape[0]:
return np.ones(n_chan), np.eye(n_chan), np.ones(n_chan, bool)
dtype = complex if C.dtype == np.complex_ else float
eig = np.zeros(n_chan, dtype)
eigvec = np.zeros((n_chan, n_chan), dtype)
mask = np.zeros(n_chan, bool)
for ch_type, picks in _picks_by_type(info, meg_combined=True,
ref_meg=False, exclude='bads'):
if len(picks) == 0:
continue
this_C = C[np.ix_(picks, picks)]
if ch_type not in rank and ch_type in ('mag', 'grad'):
this_rank = rank['meg'] # if there is only one or the other
else:
this_rank = rank[ch_type]
e, ev, m = _get_ch_whitener(this_C, False, ch_type.upper(), this_rank)
if proj_subspace:
# Choose the subspace the same way we do for projections
e, ev = _eigvec_subspace(e, ev, m)
eig[picks], eigvec[np.ix_(picks, picks)], mask[picks] = e, ev, m
# XXX : also handle ref for sEEG and ECoG
if ch_type == 'eeg' and _needs_eeg_average_ref_proj(info) and not \
_has_eeg_average_ref_proj(projs):
warn('No average EEG reference present in info["projs"], '
'covariance may be adversely affected. Consider recomputing '
'covariance using with an average eeg reference projector '
'added.')
return eig, eigvec, mask
@verbose
def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads',
proj=True, seeg=0.1, ecog=0.1, hbo=0.1, hbr=0.1,
fnirs_cw_amplitude=0.1, fnirs_fd_ac_amplitude=0.1,
fnirs_fd_phase=0.1, fnirs_od=0.1, csd=0.1, dbs=0.1,
rank=None, scalings=None, verbose=None):
"""Regularize noise covariance matrix.
This method works by adding a constant to the diagonal for each
channel type separately. Special care is taken to keep the
rank of the data constant.
.. note:: This function is kept for reasons of backward-compatibility.
Please consider explicitly using the ``method`` parameter in
:func:`mne.compute_covariance` to directly combine estimation
with regularization in a data-driven fashion. See the `faq
<http://mne.tools/dev/overview/faq.html#how-should-i-regularize-the-covariance-matrix>`_
for more information.
Parameters
----------
cov : Covariance
The noise covariance matrix.
info : dict
The measurement info (used to get channel types and bad channels).
mag : float (default 0.1)
Regularization factor for MEG magnetometers.
grad : float (default 0.1)
Regularization factor for MEG gradiometers. Must be the same as
``mag`` if data have been processed with SSS.
eeg : float (default 0.1)
Regularization factor for EEG.
exclude : list | 'bads' (default 'bads')
List of channels to mark as bad. If 'bads', bads channels
are extracted from both info['bads'] and cov['bads'].
proj : bool (default True)
Apply projections to keep rank of data.
seeg : float (default 0.1)
Regularization factor for sEEG signals.
ecog : float (default 0.1)
Regularization factor for ECoG signals.
hbo : float (default 0.1)
Regularization factor for HBO signals.
hbr : float (default 0.1)
Regularization factor for HBR signals.
fnirs_cw_amplitude : float (default 0.1)
Regularization factor for fNIRS CW raw signals.
fnirs_fd_ac_amplitude : float (default 0.1)
Regularization factor for fNIRS FD AC raw signals.
fnirs_fd_phase : float (default 0.1)
Regularization factor for fNIRS raw phase signals.
fnirs_od : float (default 0.1)
Regularization factor for fNIRS optical density signals.
csd : float (default 0.1)
Regularization factor for EEG-CSD signals.
dbs : float (default 0.1)
Regularization factor for DBS signals.
%(rank_None)s
.. versionadded:: 0.17
.. versionadded:: 0.18
Support for 'info' mode.
scalings : dict | None
Data will be rescaled before rank estimation to improve accuracy.
See :func:`mne.compute_covariance`.
.. versionadded:: 0.17
%(verbose)s
Returns
-------
reg_cov : Covariance
The regularized covariance matrix.
See Also
--------
mne.compute_covariance
""" # noqa: E501
from scipy import linalg
cov = cov.copy()
info._check_consistency()
scalings = _handle_default('scalings_cov_rank', scalings)
regs = dict(eeg=eeg, seeg=seeg, dbs=dbs, ecog=ecog, hbo=hbo, hbr=hbr,
fnirs_cw_amplitude=fnirs_cw_amplitude,
fnirs_fd_ac_amplitude=fnirs_fd_ac_amplitude,
fnirs_fd_phase=fnirs_fd_phase, fnirs_od=fnirs_od, csd=csd)
if exclude is None:
raise ValueError('exclude must be a list of strings or "bads"')
if exclude == 'bads':
exclude = info['bads'] + cov['bads']
picks_dict = {ch_type: [] for ch_type in _DATA_CH_TYPES_SPLIT}
meg_combined = 'auto' if rank != 'full' else False
picks_dict.update(dict(_picks_by_type(
info, meg_combined=meg_combined, exclude=exclude, ref_meg=False)))
if len(picks_dict.get('meg', [])) > 0 and rank != 'full': # combined
if mag != grad:
raise ValueError('On data where magnetometers and gradiometers '
'are dependent (e.g., SSSed data), mag (%s) must '
'equal grad (%s)' % (mag, grad))
logger.info('Regularizing MEG channels jointly')
regs['meg'] = mag
else:
regs.update(mag=mag, grad=grad)
if rank != 'full':
rank = compute_rank(cov, rank, scalings, info)
info_ch_names = info['ch_names']
ch_names_by_type = dict()
for ch_type, picks_type in picks_dict.items():
ch_names_by_type[ch_type] = [info_ch_names[i] for i in picks_type]
# This actually removes bad channels from the cov, which is not backward
# compatible, so let's leave all channels in
cov_good = pick_channels_cov(cov, include=info_ch_names, exclude=exclude)
ch_names = cov_good.ch_names
# Now get the indices for each channel type in the cov
idx_cov = {ch_type: [] for ch_type in ch_names_by_type}
for i, ch in enumerate(ch_names):
for ch_type in ch_names_by_type:
if ch in ch_names_by_type[ch_type]:
idx_cov[ch_type].append(i)
break
else:
raise Exception('channel %s is unknown type' % ch)
C = cov_good['data']
assert len(C) == sum(map(len, idx_cov.values()))
if proj:
projs = info['projs'] + cov_good['projs']
projs = activate_proj(projs)
for ch_type in idx_cov:
desc = ch_type.upper()
idx = idx_cov[ch_type]
if len(idx) == 0:
continue
reg = regs[ch_type]
if reg == 0.0:
logger.info(" %s regularization : None" % desc)
continue
logger.info(" %s regularization : %s" % (desc, reg))
this_C = C[np.ix_(idx, idx)]
U = np.eye(this_C.shape[0])
this_ch_names = [ch_names[k] for k in idx]
if rank == 'full':
if proj:
P, ncomp, _ = make_projector(projs, this_ch_names)
if ncomp > 0:
# This adjustment ends up being redundant if rank is None:
U = linalg.svd(P)[0][:, :-ncomp]
logger.info(' Created an SSP operator for %s '
'(dimension = %d)' % (desc, ncomp))
else:
this_picks = pick_channels(info['ch_names'], this_ch_names)
this_info = pick_info(info, this_picks)
# Here we could use proj_subspace=True, but this should not matter
# since this is already in a loop over channel types
_, eigvec, mask = _smart_eigh(this_C, this_info, rank)
U = eigvec[mask].T
this_C = np.dot(U.T, np.dot(this_C, U))
sigma = np.mean(np.diag(this_C))
this_C.flat[::len(this_C) + 1] += reg * sigma # modify diag inplace
this_C = np.dot(U, np.dot(this_C, U.T))
C[np.ix_(idx, idx)] = this_C
# Put data back in correct locations
idx = pick_channels(cov.ch_names, info_ch_names, exclude=exclude)
cov['data'][np.ix_(idx, idx)] = C
return cov
def _regularized_covariance(data, reg=None, method_params=None, info=None,
rank=None):
"""Compute a regularized covariance from data using sklearn.
This is a convenience wrapper for mne.decoding functions, which
adopted a slightly different covariance API.
Returns
-------
cov : ndarray, shape (n_channels, n_channels)
The covariance matrix.
"""
_validate_type(reg, (str, 'numeric', None))
if reg is None:
reg = 'empirical'
elif not isinstance(reg, str):
reg = float(reg)
if method_params is not None:
raise ValueError('If reg is a float, method_params must be None '
'(got %s)' % (type(method_params),))
method_params = dict(shrinkage=dict(
shrinkage=reg, assume_centered=True, store_precision=False))
reg = 'shrinkage'
method, method_params = _check_method_params(
reg, method_params, name='reg', allow_auto=False, rank=rank)
# use mag instead of eeg here to avoid the cov EEG projection warning
info = create_info(data.shape[-2], 1000., 'mag') if info is None else info
picks_list = _picks_by_type(info)
scalings = _handle_default('scalings_cov_rank', None)
cov = _compute_covariance_auto(
data.T, method=method, method_params=method_params,
info=info, cv=None, n_jobs=1, stop_early=True,
picks_list=picks_list, scalings=scalings,
rank=rank)[reg]['data']
return cov
@verbose
def compute_whitener(noise_cov, info=None, picks=None, rank=None,
scalings=None, return_rank=False, pca=False,
return_colorer=False, on_rank_mismatch='warn',
verbose=None):
"""Compute whitening matrix.
Parameters
----------
noise_cov : Covariance
The noise covariance.
info : dict | None
The measurement info. Can be None if ``noise_cov`` has already been
prepared with :func:`prepare_noise_cov`.
%(picks_good_data_noref)s
%(rank_None)s
.. versionadded:: 0.18
Support for 'info' mode.
scalings : dict | None
The rescaling method to be applied. See documentation of
``prepare_noise_cov`` for details.
return_rank : bool
If True, return the rank used to compute the whitener.
.. versionadded:: 0.15
pca : bool | str
Space to project the data into. Options:
:data:`python:True`
Whitener will be shape (n_nonzero, n_channels).
``'white'``
Whitener will be shape (n_channels, n_channels), potentially rank
deficient, and have the first ``n_channels - n_nonzero`` rows and
columns set to zero.
:data:`python:False` (default)
Whitener will be shape (n_channels, n_channels), potentially rank
deficient, and rotated back to the space of the original data.
.. versionadded:: 0.18
return_colorer : bool
If True, return the colorer as well.
%(on_rank_mismatch)s
%(verbose)s
Returns
-------
W : ndarray, shape (n_channels, n_channels) or (n_nonzero, n_channels)
The whitening matrix.
ch_names : list
The channel names.
rank : int
Rank reduction of the whitener. Returned only if return_rank is True.
colorer : ndarray, shape (n_channels, n_channels) or (n_channels, n_nonzero)
The coloring matrix.
""" # noqa: E501
_validate_type(pca, (str, bool), 'space')
_valid_pcas = (True, 'white', False)
if pca not in _valid_pcas:
raise ValueError('space must be one of %s, got %s'
% (_valid_pcas, pca))
if info is None:
if 'eig' not in noise_cov:
raise ValueError('info can only be None if the noise cov has '
'already been prepared with prepare_noise_cov')
ch_names = deepcopy(noise_cov['names'])
else:
picks = _picks_to_idx(info, picks, with_ref_meg=False)
ch_names = [info['ch_names'][k] for k in picks]
del picks
noise_cov = prepare_noise_cov(
noise_cov, info, ch_names, rank, scalings,
on_rank_mismatch=on_rank_mismatch)
n_chan = len(ch_names)
assert n_chan == len(noise_cov['eig'])
# Omit the zeroes due to projection
eig = noise_cov['eig'].copy()
nzero = (eig > 0)
eig[~nzero] = 0. # get rid of numerical noise (negative) ones
if noise_cov['eigvec'].dtype.kind == 'c':
dtype = np.complex128
else:
dtype = np.float64
W = np.zeros((n_chan, 1), dtype)
W[nzero, 0] = 1.0 / np.sqrt(eig[nzero])
# Rows of eigvec are the eigenvectors
W = W * noise_cov['eigvec'] # C ** -0.5
C = np.sqrt(eig) * noise_cov['eigvec'].conj().T # C ** 0.5
n_nzero = nzero.sum()
logger.info(' Created the whitener using a noise covariance matrix '
'with rank %d (%d small eigenvalues omitted)'
% (n_nzero, noise_cov['dim'] - n_nzero))
# Do the requested projection
if pca is True:
W = W[nzero]
C = C[:, nzero]
elif pca is False:
W = np.dot(noise_cov['eigvec'].conj().T, W)
C = np.dot(C, noise_cov['eigvec'])
# Triage return
out = W, ch_names
if return_rank:
out += (n_nzero,)
if return_colorer:
out += (C,)
return out
@verbose
def whiten_evoked(evoked, noise_cov, picks=None, diag=None, rank=None,
scalings=None, verbose=None):
"""Whiten evoked data using given noise covariance.
Parameters
----------
evoked : instance of Evoked
The evoked data.
noise_cov : instance of Covariance
The noise covariance.
%(picks_good_data)s
diag : bool (default False)
If True, whiten using only the diagonal of the covariance.
%(rank_None)s
.. versionadded:: 0.18
Support for 'info' mode.
scalings : dict | None (default None)
To achieve reliable rank estimation on multiple sensors,
sensors have to be rescaled. This parameter controls the
rescaling. If dict, it will override the
following default dict (default if None):
dict(mag=1e12, grad=1e11, eeg=1e5)
%(verbose)s
Returns
-------
evoked_white : instance of Evoked
The whitened evoked data.
"""
evoked = evoked.copy()
picks = _picks_to_idx(evoked.info, picks)
if diag:
noise_cov = noise_cov.as_diag()
W, _ = compute_whitener(noise_cov, evoked.info, picks=picks,
rank=rank, scalings=scalings)
evoked.data[picks] = np.sqrt(evoked.nave) * np.dot(W, evoked.data[picks])
return evoked
@verbose
def _read_cov(fid, node, cov_kind, limited=False, verbose=None):
"""Read a noise covariance matrix."""
# Find all covariance matrices
from scipy import sparse
covs = dir_tree_find(node, FIFF.FIFFB_MNE_COV)
if len(covs) == 0:
raise ValueError('No covariance matrices found')
# Is any of the covariance matrices a noise covariance
for p in range(len(covs)):
tag = find_tag(fid, covs[p], FIFF.FIFF_MNE_COV_KIND)
if tag is not None and int(tag.data) == cov_kind:
this = covs[p]
# Find all the necessary data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIM)
if tag is None:
raise ValueError('Covariance matrix dimension not found')
dim = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_NFREE)
if tag is None:
nfree = -1
else:
nfree = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_METHOD)
if tag is None:
method = None
else:
method = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_SCORE)
if tag is None:
score = None
else:
score = tag.data[0]
tag = find_tag(fid, this, FIFF.FIFF_MNE_ROW_NAMES)
if tag is None:
names = []
else:
names = tag.data.split(':')
if len(names) != dim:
raise ValueError('Number of names does not match '
'covariance matrix dimension')
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIAG)
if tag is None:
raise ValueError('No covariance matrix data found')
else:
# Diagonal is stored
data = tag.data
diag = True
logger.info(' %d x %d diagonal covariance (kind = '
'%d) found.' % (dim, dim, cov_kind))
else:
if not sparse.issparse(tag.data):
# Lower diagonal is stored
vals = tag.data
data = np.zeros((dim, dim))
data[np.tril(np.ones((dim, dim))) > 0] = vals
data = data + data.T
data.flat[::dim + 1] /= 2.0
diag = False
logger.info(' %d x %d full covariance (kind = %d) '
'found.' % (dim, dim, cov_kind))
else:
diag = False
data = tag.data
logger.info(' %d x %d sparse covariance (kind = %d)'
' found.' % (dim, dim, cov_kind))
# Read the possibly precomputed decomposition
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVALUES)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVECTORS)
if tag1 is not None and tag2 is not None:
eig = tag1.data
eigvec = tag2.data
else:
eig = None
eigvec = None
# Read the projection operator
projs = _read_proj(fid, this)
# Read the bad channel list
bads = _read_bad_channels(fid, this, None)
# Put it together
assert dim == len(data)
assert data.ndim == (1 if diag else 2)
cov = dict(kind=cov_kind, diag=diag, dim=dim, names=names,
data=data, projs=projs, bads=bads, nfree=nfree, eig=eig,
eigvec=eigvec)
if score is not None:
cov['loglik'] = score
if method is not None:
cov['method'] = method
if limited:
del cov['kind'], cov['dim'], cov['diag']
return cov
logger.info(' Did not find the desired covariance matrix (kind = %d)'
% cov_kind)
return None
def _write_cov(fid, cov):
"""Write a noise covariance matrix."""
start_block(fid, FIFF.FIFFB_MNE_COV)
# Dimensions etc.
write_int(fid, FIFF.FIFF_MNE_COV_KIND, cov['kind'])
write_int(fid, FIFF.FIFF_MNE_COV_DIM, cov['dim'])
if cov['nfree'] > 0:
write_int(fid, FIFF.FIFF_MNE_COV_NFREE, cov['nfree'])
# Channel names
if cov['names'] is not None and len(cov['names']) > 0:
write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, cov['names'])
# Data
if cov['diag']:
write_double(fid, FIFF.FIFF_MNE_COV_DIAG, cov['data'])
else:
# Store only lower part of covariance matrix
dim = cov['dim']
mask = np.tril(np.ones((dim, dim), dtype=bool)) > 0
vals = cov['data'][mask].ravel()
write_double(fid, FIFF.FIFF_MNE_COV, vals)
# Eigenvalues and vectors if present
if cov['eig'] is not None and cov['eigvec'] is not None:
write_float_matrix(fid, FIFF.FIFF_MNE_COV_EIGENVECTORS, cov['eigvec'])
write_double(fid, FIFF.FIFF_MNE_COV_EIGENVALUES, cov['eig'])
# Projection operator
if cov['projs'] is not None and len(cov['projs']) > 0:
_write_proj(fid, cov['projs'])
# Bad channels
if cov['bads'] is not None and len(cov['bads']) > 0:
start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, cov['bads'])
end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
# estimator method
if 'method' in cov:
write_string(fid, FIFF.FIFF_MNE_COV_METHOD, cov['method'])
# negative log-likelihood score
if 'loglik' in cov:
write_double(
fid, FIFF.FIFF_MNE_COV_SCORE, np.array(cov['loglik']))
# Done!
end_block(fid, FIFF.FIFFB_MNE_COV)
| bsd-3-clause |
moonbury/notebooks | github/MasteringMLWithScikit-learn/8365OS_04_Codes/reuters.py | 3 | 1368 | __author__ = 'gavin'
import glob
from scipy.sparse import vstack
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import LogisticRegression
# Create X and raw y
X = None
labels_for_all_instances = []
vectorizer = HashingVectorizer()
d = {d[0]: d[1:] for d in [l.strip()[9:].split(' ') for l in open('reuters/cats.txt', 'rb') if l.startswith('training')]}
for f in glob.glob('/home/gavin/PycharmProjects/mastering-machine-learning/ch4-logistic_regression/reuters/training/*'):
text = ' '.join([label.strip() for label in open(f, 'rb')])
if X is None:
X = vectorizer.fit_transform([text])
else:
X = vstack((X, vectorizer.fit_transform([text])))
training_id = f[f.rfind('/')+1:]
labels_for_all_instances.append(d[training_id])
print X.shape
train_len = int(X.shape[0] * .7)
train_len = X.shape[0]-1
X_train = X.tocsc()[:train_len]
X_test = X.tocsc()[train_len:]
for label in set([label for instance in labels_for_all_instances for label in instance]):
y = [1 if label in instance else 0 for instance in labels_for_all_instances]
y_train = y[:train_len]
y_test = y[train_len:]
print len(y_test)
print X_test.shape
classifier = LogisticRegression()
classifier.fit_transform(X_train, y_train)
print 'Accuracy for %s: %s' % (label, classifier.score(X_test, y_test)) | gpl-3.0 |
hsuantien/scikit-learn | examples/svm/plot_rbf_parameters.py | 35 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radius Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by give the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector it-self and no amount of
regularization with ``C`` will be able to prevent of overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on a the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 class to has
# to make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
Ecotrust/madrona-priorities | util/test_scenarios.py | 3 | 6555 | from django.core.management import setup_environ
import os
import sys
sys.path.append(os.path.dirname(os.path.join('..','priorities',__file__)))
import settings
setup_environ(settings)
#==================================#
from seak.models import Scenario, ConservationFeature, PlanningUnit, Cost, PuVsCf, PuVsCost
from django.contrib.auth.models import User
from django.utils import simplejson as json
from django.conf import settings
from django.contrib.gis.geos import GEOSGeometry
import time
import random
def mean(alist):
floatNums = [float(x) for x in alist]
return sum(floatNums) / len(alist)
user, created = User.objects.get_or_create(username='mperry')
scalefactors = []
num_species = []
num_units = []
factors = [2, 3, 4, 5]
#factors = [float(x)/20.0 for x in range(20,100)]
numspecies = [2,3,4]
numcosts = [2,1,3]
targets = [0.5, 0.25]
penalties = [0.5, 0.25]
settings.MARXAN_NUMREPS = 3
#MODE = 'hardcoded'
#MODE = 'query'
MODE = 'create'
if MODE == 'query':
wp = Scenario.objects.filter(name__startswith="Test Scale Factor")
for w in wp:
print "Querying", w.name, w
scalefactors.append(w.input_scalefactor)
r = w.results
num_species.append(r['num_met'])
num_units.append(r['num_units'])
w.kml
COUNT = 0
def create_wp(target_dict, penalties_dict, costs_dict, geography_list, sf):
global COUNT
COUNT += 1
print target_dict
print penalties_dict
print costs_dict
print geography_list
print sf
with open(os.path.join(os.path.dirname(__file__), 'random_words.txt'),'r') as fh:
name = ' '.join([x.strip() for x in random.sample(fh.readlines(), 2)])
name += " - %s" % sf
wp = Scenario(input_targets = json.dumps(
target_dict
),
input_penalties = json.dumps(
penalties_dict
),
input_relativecosts=json.dumps(
costs_dict
),
input_geography=json.dumps(
geography_list
),
input_scalefactor=sf,
name= name, user=user)
return wp
if MODE == 'create':
wp = Scenario.objects.all() #filter(name__startswith="Auto Test Scale Factor")
wp.delete()
cfs = ConservationFeature.objects.all()
keys = []
for c in cfs:
a = c.level_string
while a.endswith('---'):
print a
a = a[:-3]
keys.append(a)
fh = open("/home/mperry/results.csv", 'w+')
fh.write('ncosts, nspecies, sumpenalties, meanpenalties, scalefactor, numspeciesmet, numplannningunits')
fh.write('\n')
fh.flush()
g = GEOSGeometry('POINT(-13874668 %s)' % random.randrange(5005012, 8101549))
for f in factors:
for nc in numcosts:
for n in numspecies:
for i in range(1):
#if random.choice([True,False]):
geography_list = [x.fid for x in PlanningUnit.objects.filter(geometry__strictly_below=g)]
#else:
#geography_list = [x.fid for x in PlanningUnit.objects.filter(geometry__strictly_above=g)]
try:
n = int(n)
target_dict = {}
penalty_dict = {}
# pick n random species
selected_key = random.sample(keys, n) #'blah---blah'
if random.choice([True,False]):
t = random.choice(targets)
p = random.choice(penalties)
else:
t = None
p = None
for key in selected_key:
if t and p:
# Use the predetermined for ALL species
target_dict[key] = t
penalty_dict[key] = p
else:
# random for each species
target_dict[key] = random.choice(targets)
penalty_dict[key] = random.choice(penalties)
except ValueError:
# ALL species
t = random.choice(targets)
p = random.choice(penalties)
t2 = random.choice(targets)
p2 = random.choice(penalties)
target_dict = { "coordinate":t, "uids":t2 }
penalty_dict = { "coordinate":p, "uids":p2 }
costs_dict = {}
for a in random.sample([c.slug for c in Cost.objects.all()], nc):
costs_dict[a] = 1
sf = f
wp = create_wp(target_dict, penalty_dict, costs_dict, geography_list, sf)
print "####################################"
print 'targets', wp.input_targets
print 'penalties', wp.input_penalties
print 'costs', wp.input_relativecosts
wp.save()
#continue
while not wp.done:
time.sleep(2)
print " ", wp.status_html
inpenalties = json.loads(wp.input_penalties)
r = wp.results
#'ncosts, nspecies, sumpenalties, meanpenalties, scalefactor, numspeciesmet, numplannningunits'
fh.write(','.join([str(x) for x in [
sum(json.loads(wp.input_relativecosts).values()),
len(inpenalties.values()),
sum(inpenalties.values()),
mean(inpenalties.values()),
wp.input_scalefactor,
r['num_met'],
r['num_units']
]]))
fh.write('\n')
fh.flush()
if MODE == 'hardcoded':
scalefactors = [0.1, 0.2, 0.25, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.25, 1.5, 2, 4, 8, 16, 32]
num_units = [0, 3, 9, 17, 46, 57, 63, 73, 76, 79, 81, 82, 82, 83, 85, 90, 92, 93, 91]
num_species = [0, 1, 4, 10, 27, 38, 37, 54, 57, 58, 63, 59, 62, 66, 66, 69, 71, 71, 71]
#import matplotlib.pyplot as plt
#fig = plt.figure()
#plt.xlabel('Scale Factor')
#plt.ylabel('Number of Species Goals Met')
#ax = fig.add_subplot(111)
#ax.scatter(scalefactors, num_species)
#ax.set_xscale('log')
#plt.show()
| bsd-3-clause |
rueckstiess/dopamine | scripts/cart_bas_rbf.py | 1 | 1405 | from dopamine.environments import CartPoleEnvironment, CartPoleRenderer
from dopamine.agents import BASAgent, RBFEstimator, NNEstimator
from dopamine.experiments import Experiment
from dopamine.adapters import EpsilonGreedyExplorer, NormalizingAdapter, IndexingAdapter
from matplotlib import pyplot as plt
from numpy import *
# create agent, environment, renderer, experiment
agent = BASAgent(estimatorClass=NNEstimator)
environment = CartPoleEnvironment()
experiment = Experiment(environment, agent)
# cut off last two state dimensions
indexer = IndexingAdapter([0, 1], None)
experiment.addAdapter(indexer)
# add normalization adapter
normalizer = NormalizingAdapter(scaleActions=[(-50, 50)])
experiment.addAdapter(normalizer)
# # add e-greedy exploration
# explorer = EpsilonGreedyExplorer(0.4, episodeCount=500)
# experiment.addAdapter(explorer)
experiment.runEpisodes(10)
agent.forget()
# explorer.decay = 0.999
# renderer = CartPoleRenderer()
# environment.renderer = renderer
# renderer.start()
# run experiment
for i in range(100):
experiment.runEpisodes(5)
agent.learn()
# agent.forget()
valdata = experiment.evaluateEpisodes(10, visualize=True)
# print "exploration", explorer.epsilon
print "mean return", mean([sum(v.rewards) for v in valdata])
print "num episodes", len(agent.history)
# print "num total samples", agent.history.numTotalSamples()
| gpl-3.0 |
JazzeYoung/VeryDeepAutoEncoder | pylearn2/pylearn2/sandbox/cuda_convnet/bench.py | 44 | 3589 | __authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.testing.skip import skip_if_no_gpu
skip_if_no_gpu()
import numpy as np
from theano.compat.six.moves import xrange
from theano import shared
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from theano.tensor.nnet.conv import conv2d
from theano import function
import time
import matplotlib.pyplot as plt
def make_funcs(batch_size, rows, cols, channels, filter_rows,
num_filters):
rng = np.random.RandomState([2012,10,9])
filter_cols = filter_rows
base_image_value = rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32')
base_filters_value = rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32')
images = shared(base_image_value)
filters = shared(base_filters_value, name='filters')
# bench.py should always be run in gpu mode so we should not need a gpu_from_host here
output = FilterActs()(images, filters)
output_shared = shared( output.eval() )
cuda_convnet = function([], updates = { output_shared : output } )
cuda_convnet.name = 'cuda_convnet'
images_bc01v = base_image_value.transpose(3,0,1,2)
filters_bc01v = base_filters_value.transpose(3,0,1,2)
filters_bc01v = filters_bc01v[:,:,::-1,::-1]
images_bc01 = shared(images_bc01v)
filters_bc01 = shared(filters_bc01v)
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid', image_shape = images_bc01v.shape,
filter_shape = filters_bc01v.shape)
output_conv2d_shared = shared(output_conv2d.eval())
baseline = function([], updates = { output_conv2d_shared : output_conv2d } )
baseline.name = 'baseline'
return cuda_convnet, baseline
def bench(f):
for i in xrange(3):
f()
trials = 10
t1 = time.time()
for i in xrange(trials):
f()
t2 = time.time()
return (t2-t1)/float(trials)
def get_speedup( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
return bench(baseline) / bench(cuda_convnet)
def get_time_per_10k_ex( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
batch_size = kwargs['batch_size']
return 10000 * bench(cuda_convnet) / float(batch_size)
def make_batch_size_plot(yfunc, yname, batch_sizes, rows, cols, channels, filter_rows, num_filters):
speedups = []
for batch_size in batch_sizes:
speedup = yfunc(batch_size = batch_size,
rows = rows,
cols = cols,
channels = channels,
filter_rows = filter_rows,
num_filters = num_filters)
speedups.append(speedup)
plt.plot(batch_sizes, speedups)
plt.title("cuda-convnet benchmark")
plt.xlabel("Batch size")
plt.ylabel(yname)
plt.show()
make_batch_size_plot(get_speedup, "Speedup factor", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 64,
filter_rows = 7,
num_filters = 64)
"""
make_batch_size_plot(get_time_per_10k_ex, "Time per 10k examples", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 5,
num_filters = 64)
"""
| bsd-3-clause |
RomainSabathe/kaggle_airbnb2015 | Code/enhance_data.py | 1 | 5756 | import pandas as pd
import numpy as np
""" SETTINGS """
is_training_data = True
complete_enhancing = True
""" CODE """
""" Recovering the data """
if complete_enhancing:
type_of_data = 'base'
else:
type_of_data = 'enhanced'
if is_training_data:
dataname = 'learning_data'
else:
dataname = 'testing_data'
store = pd.HDFStore('../Data/%s_%s.h5' % (type_of_data, dataname))
data = store['data_users']
new_data = data
store.close()
store = pd.HDFStore('../Data/base_sessions.h5')
sessions = store['sessions']
store.close()
""" Dummifying """
to_dummify = ['gender',
'signup_method',
'signup_flow',
'language',
'affiliate_channel',
'affiliate_provider',
'first_affiliate_tracked',
'signup_app',
'first_device_type',
'first_browser',
]
new_data = pd.DataFrame(index=data.index)
for feature in to_dummify:
new_data = pd.concat([new_data, pd.get_dummies(data[feature],
prefix=feature)],
axis = 1)
""" Adding extra columns """
new_data = pd.concat([new_data, data[[col for col in data.columns.values if col not in to_dummify]]],
axis=1)
with open('../Data/%s_state_of_completing.txt' % dataname, 'w') as f:
f.write('Base dummy variables: ok.')
""" Correcting some of the variables """
to_reduce = ['date_account_created_year',
'timestamp_first_active_year',
'date_first_booking_year']
for col in to_reduce:
new_data[col] = new_data[col] - 2012
with open('../Data/%s_state_of_completing.txt' % dataname, 'a') as f:
f.write('\nCentering years: ok.')
""" Integrating sessions data """
groups = sessions.groupby(sessions.index)
actions_count = groups.agg('count')['action']
new_data['actions_count'] = actions_count
new_data['actions_count'] = new_data['actions_count'].fillna(0)
with open('../Data/%s_state_of_completing.txt' % dataname, 'a') as f:
f.write('\nCounting the number of actions per user: ok.')
for col in ['action', 'action_type', 'action_detail', 'device_type']:
print col
groups = sessions.groupby([sessions.index, sessions[col]])
counts = groups.agg('count').iloc[:,0].unstack()
new_col_names = {old_name: '%s_%s_count' % (col, old_name) \
for old_name in counts.columns.values}
counts = counts.rename(columns=new_col_names)
counts = counts.fillna(0)
#new_data = pd.concat([new_data, counts], axis=1)
new_data = new_data.join(counts, how='left')
with open('../Data/%s_state_of_completing.txt' % dataname, 'a') as f:
f.write("\nCounting the number of '%s' per user: ok." % col)
new_data = new_data.fillna(0)
""" Drop data that is not accessible in the testing set. """
new_data = new_data.drop(['date_first_booking_year',
'date_first_booking_month',
'date_first_booking_day'], axis=1)
with open('../Data/%s_state_of_completing.txt' % dataname, 'a') as f:
f.write('\nDrop date first booking: ok.')
""" Adding time component """
secs_elapsed = sessions['secs_elapsed'].replace({'undef':0})
secs_elapsed = secs_elapsed.fillna(0)
groups = secs_elapsed.groupby(sessions.index)
secs_elapsed = groups.agg(np.sum)
new_data['sum_secs_elapsed'] = secs_elapsed
with open('../Data/%s_state_of_completing.txt' % dataname, 'a') as f:
f.write('\nAdded time component: ok.')
""" Even more dummy variables """
features = ['date_account_created_year',
'date_account_created_month', 'date_account_created_day',
'timestamp_first_active_year', 'timestamp_first_active_month',
'timestamp_first_active_day']
for feature in features:
new_data = pd.concat([new_data, pd.get_dummies(data[feature],
prefix=feature)],
axis = 1)
with open('../Data/%s_state_of_completing.txt' % dataname, 'a') as f:
f.write('\nEven more dummy variables: ok.')
""" Some key dates """
condition = (new_data['date_account_created_month'] == 9) & \
(new_data['date_account_created_day'] >= 20)
new_data.loc[condition, 'end_of_september'] = 1
new_data.loc[condition - True, 'end_of_september'] = 0
condition = (new_data['date_account_created_month'] == 12) & \
(new_data['date_account_created_day'] >= 17)
new_data.loc[condition, 'around_christmas'] = 1
new_data.loc[condition - True, 'around_christmas'] = 0
condition = (new_data['date_account_created_month'] == 6) | \
(new_data['date_account_created_month'] == 7)
new_data.loc[condition, 'pre_summer'] = 1
new_data.loc[condition - True, 'pre_summer'] = 0
condition = (new_data['date_account_created_day'] != \
new_data['timestamp_first_active_day'])
new_data.loc[condition, 'signin_on_different_day'] = 1
new_data.loc[condition - True, 'signin_on_different_day'] = 0
with open('../Data/%s_state_of_completing.txt' % dataname, 'a') as f:
f.write('\nSomme binary variables regarding the calendar: ok.')
# ASSUMPTION HERE: we do NOT take care of missing values in 'secs_elapsed'
#sub_sessions = sessions[sessions['secs_elapsed'] != 'undef']
#groups = sub_sessions.groupby(sub_sessions.index)
#time_data = groups.agg(['mean', 'var'])
#new_data = pd.concat([new_data, time_data], axis=1)
#with open('../Data/%s_state_of_completing.txt' % dataname, 'a') as f:
# f.write('\nAdding mean and var waiting time per user: ok.')
""" Saving """
if is_training_data:
store = pd.HDFStore('../Data/enhanced_learning_restricted_data.h5')
else:
store = pd.HDFStore('../Data/enhanced_testing_data.h5')
store['data_users'] = new_data
store.close()
| mit |
fmfn/UnbalancedDataset | examples/over-sampling/plot_comparison_over_sampling.py | 2 | 10842 | """
==============================
Compare over-sampling samplers
==============================
The following example attends to make a qualitative comparison between the
different over-sampling algorithms available in the imbalanced-learn package.
"""
# Authors: Guillaume Lemaitre <[email protected]>
# License: MIT
# %%
print(__doc__)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context("poster")
# %% [markdown]
# The following function will be used to create toy dataset. It uses the
# :func:`~sklearn.datasets.make_classification` from scikit-learn but fixing
# some parameters.
# %%
from sklearn.datasets import make_classification
def create_dataset(
n_samples=1000,
weights=(0.01, 0.01, 0.98),
n_classes=3,
class_sep=0.8,
n_clusters=1,
):
return make_classification(
n_samples=n_samples,
n_features=2,
n_informative=2,
n_redundant=0,
n_repeated=0,
n_classes=n_classes,
n_clusters_per_class=n_clusters,
weights=list(weights),
class_sep=class_sep,
random_state=0,
)
# %% [markdown]
# The following function will be used to plot the sample space after resampling
# to illustrate the specificities of an algorithm.
# %%
def plot_resampling(X, y, sampler, ax, title=None):
X_res, y_res = sampler.fit_resample(X, y)
ax.scatter(X_res[:, 0], X_res[:, 1], c=y_res, alpha=0.8, edgecolor="k")
if title is None:
title = f"Resampling with {sampler.__class__.__name__}"
ax.set_title(title)
sns.despine(ax=ax, offset=10)
# %% [markdown]
# The following function will be used to plot the decision function of a
# classifier given some data.
# %%
import numpy as np
def plot_decision_function(X, y, clf, ax, title=None):
plot_step = 0.02
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(
np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)
)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, alpha=0.4)
ax.scatter(X[:, 0], X[:, 1], alpha=0.8, c=y, edgecolor="k")
if title is not None:
ax.set_title(title)
# %% [markdown]
# Illustration of the influence of the balancing ratio
# ----------------------------------------------------
#
# We will first illustrate the influence of the balancing ratio on some toy
# data using a logistic regression classifier which is a linear model.
# %%
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
# %% [markdown]
# We will fit and show the decision boundary model to illustrate the impact of
# dealing with imbalanced classes.
# %%
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(15, 12))
weights_arr = (
(0.01, 0.01, 0.98),
(0.01, 0.05, 0.94),
(0.2, 0.1, 0.7),
(0.33, 0.33, 0.33),
)
for ax, weights in zip(axs.ravel(), weights_arr):
X, y = create_dataset(n_samples=300, weights=weights)
clf.fit(X, y)
plot_decision_function(X, y, clf, ax, title=f"weight={weights}")
fig.suptitle(f"Decision function of {clf.__class__.__name__}")
fig.tight_layout()
# %% [markdown]
# Greater is the difference between the number of samples in each class, poorer
# are the classification results.
#
# Random over-sampling to balance the data set
# --------------------------------------------
#
# Random over-sampling can be used to repeat some samples and balance the
# number of samples between the dataset. It can be seen that with this trivial
# approach the boundary decision is already less biased toward the majority
# class. The class :class:`~imblearn.over_sampling.RandomOverSampler`
# implements such of a strategy.
# %%
from imblearn.pipeline import make_pipeline
from imblearn.over_sampling import RandomOverSampler
X, y = create_dataset(n_samples=100, weights=(0.05, 0.25, 0.7))
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(15, 7))
clf.fit(X, y)
plot_decision_function(X, y, clf, axs[0], title="Without resampling")
sampler = RandomOverSampler(random_state=0)
model = make_pipeline(sampler, clf).fit(X, y)
plot_decision_function(X, y, model, axs[1], f"Using {model[0].__class__.__name__}")
fig.suptitle(f"Decision function of {clf.__class__.__name__}")
fig.tight_layout()
# %% [markdown]
# By default, random over-sampling generates a bootstrap. The parameter
# `shrinkage` allows adding a small perturbation to the generated data
# to generate a smoothed bootstrap instead. The plot below shows the difference
# between the two data generation strategies.
# %%
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(15, 7))
sampler.set_params(shrinkage=None)
plot_resampling(X, y, sampler, ax=axs[0], title="Normal bootstrap")
sampler.set_params(shrinkage=0.3)
plot_resampling(X, y, sampler, ax=axs[1], title="Smoothed bootstrap")
fig.suptitle(f"Resampling with {sampler.__class__.__name__}")
fig.tight_layout()
# %% [markdown]
# It looks like more samples are generated with smoothed bootstrap. This is due
# to the fact that the samples generated are not superimposing with the
# original samples.
#
# More advanced over-sampling using ADASYN and SMOTE
# --------------------------------------------------
#
# Instead of repeating the same samples when over-sampling or perturbating the
# generated bootstrap samples, one can use some specific heuristic instead.
# :class:`~imblearn.over_sampling.ADASYN` and
# :class:`~imblearn.over_sampling.SMOTE` can be used in this case.
# %%
from imblearn import FunctionSampler # to use a idendity sampler
from imblearn.over_sampling import SMOTE, ADASYN
X, y = create_dataset(n_samples=150, weights=(0.1, 0.2, 0.7))
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(15, 15))
samplers = [
FunctionSampler(),
RandomOverSampler(random_state=0),
SMOTE(random_state=0),
ADASYN(random_state=0),
]
for ax, sampler in zip(axs.ravel(), samplers):
title = "Original dataset" if isinstance(sampler, FunctionSampler) else None
plot_resampling(X, y, sampler, ax, title=title)
fig.tight_layout()
# %% [markdown]
# The following plot illustrates the difference between
# :class:`~imblearn.over_sampling.ADASYN` and
# :class:`~imblearn.over_sampling.SMOTE`.
# :class:`~imblearn.over_sampling.ADASYN` will focus on the samples which are
# difficult to classify with a nearest-neighbors rule while regular
# :class:`~imblearn.over_sampling.SMOTE` will not make any distinction.
# Therefore, the decision function depending of the algorithm.
X, y = create_dataset(n_samples=150, weights=(0.05, 0.25, 0.7))
fig, axs = plt.subplots(nrows=1, ncols=3, figsize=(20, 6))
models = {
"Without sampler": clf,
"ADASYN sampler": make_pipeline(ADASYN(random_state=0), clf),
"SMOTE sampler": make_pipeline(SMOTE(random_state=0), clf),
}
for ax, (title, model) in zip(axs, models.items()):
model.fit(X, y)
plot_decision_function(X, y, model, ax=ax, title=title)
fig.suptitle(f"Decision function using a {clf.__class__.__name__}")
fig.tight_layout()
# %% [markdown]
# Due to those sampling particularities, it can give rise to some specific
# issues as illustrated below.
# %%
X, y = create_dataset(n_samples=5000, weights=(0.01, 0.05, 0.94), class_sep=0.8)
samplers = [SMOTE(random_state=0), ADASYN(random_state=0)]
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(15, 15))
for ax, sampler in zip(axs, samplers):
model = make_pipeline(sampler, clf).fit(X, y)
plot_decision_function(
X, y, clf, ax[0], title=f"Decision function with {sampler.__class__.__name__}"
)
plot_resampling(X, y, sampler, ax[1])
fig.suptitle("Particularities of over-sampling with SMOTE and ADASYN")
fig.tight_layout()
# %% [markdown]
# SMOTE proposes several variants by identifying specific samples to consider
# during the resampling. The borderline version
# (:class:`~imblearn.over_sampling.BorderlineSMOTE`) will detect which point to
# select which are in the border between two classes. The SVM version
# (:class:`~imblearn.over_sampling.SVMSMOTE`) will use the support vectors
# found using an SVM algorithm to create new sample while the KMeans version
# (:class:`~imblearn.over_sampling.KMeansSMOTE`) will make a clustering before
# to generate samples in each cluster independently depending each cluster
# density.
# %%
from imblearn.over_sampling import BorderlineSMOTE, KMeansSMOTE, SVMSMOTE
X, y = create_dataset(n_samples=5000, weights=(0.01, 0.05, 0.94), class_sep=0.8)
fig, axs = plt.subplots(5, 2, figsize=(15, 30))
samplers = [
SMOTE(random_state=0),
BorderlineSMOTE(random_state=0, kind="borderline-1"),
BorderlineSMOTE(random_state=0, kind="borderline-2"),
KMeansSMOTE(random_state=0),
SVMSMOTE(random_state=0),
]
for ax, sampler in zip(axs, samplers):
model = make_pipeline(sampler, clf).fit(X, y)
plot_decision_function(
X, y, clf, ax[0], title=f"Decision function for {sampler.__class__.__name__}"
)
plot_resampling(X, y, sampler, ax[1])
fig.suptitle("Decision function and resampling using SMOTE variants")
fig.tight_layout()
# %% [markdown]
# When dealing with a mixed of continuous and categorical features,
# :class:`~imblearn.over_sampling.SMOTENC` is the only method which can handle
# this case.
# %%
from collections import Counter
from imblearn.over_sampling import SMOTENC
rng = np.random.RandomState(42)
n_samples = 50
# Create a dataset of a mix of numerical and categorical data
X = np.empty((n_samples, 3), dtype=object)
X[:, 0] = rng.choice(["A", "B", "C"], size=n_samples).astype(object)
X[:, 1] = rng.randn(n_samples)
X[:, 2] = rng.randint(3, size=n_samples)
y = np.array([0] * 20 + [1] * 30)
print("The original imbalanced dataset")
print(sorted(Counter(y).items()))
print()
print("The first and last columns are containing categorical features:")
print(X[:5])
print()
smote_nc = SMOTENC(categorical_features=[0, 2], random_state=0)
X_resampled, y_resampled = smote_nc.fit_resample(X, y)
print("Dataset after resampling:")
print(sorted(Counter(y_resampled).items()))
print()
print("SMOTE-NC will generate categories for the categorical features:")
print(X_resampled[-5:])
print()
# %% [markdown]
# However, if the dataset is composed of only categorical features then one
# should use :class:`~imblearn.over_sampling.SMOTEN`.
# %%
from imblearn.over_sampling import SMOTEN
# Generate only categorical data
X = np.array(["A"] * 10 + ["B"] * 20 + ["C"] * 30, dtype=object).reshape(-1, 1)
y = np.array([0] * 20 + [1] * 40, dtype=np.int32)
print(f"Original class counts: {Counter(y)}")
print()
print(X[:5])
print()
sampler = SMOTEN(random_state=0)
X_res, y_res = sampler.fit_resample(X, y)
print(f"Class counts after resampling {Counter(y_res)}")
print()
print(X_res[-5:])
print()
| mit |
benjaminpope/pysco | kergain_sim_disk.py | 2 | 15139 | import numpy as np
import matplotlib.pyplot as plt
import pysco
from pysco.core import *
import fitsio
from time import time as clock
from old_diffract_tools import *
import pymultinest
from pysco.diffract_tools import shift_image_ft
from pysco.common_tasks import shift_image
from swiftmask import swiftpupil
import matplotlib as mpl
from astropy.table import Table
mpl.rcParams['figure.figsize']=(8.0,6.0) #(6.0,4.0)
mpl.rcParams['font.size']= 18 #10
mpl.rcParams['savefig.dpi']=200 #72
mpl.rcParams['axes.labelsize'] = 16
mpl.rcParams['xtick.labelsize'] = 12
mpl.rcParams['ytick.labelsize'] = 12
shift = np.fft.fftshift
fft = np.fft.fft2
ifft = np.fft.ifft2
fftfreq = np.fft.fftfreq
fftn = np.fft.fftn
rfftn = np.fft.rfftn
dtor = np.pi/180.0
# =========================================================================
# =========================================================================
def print_time(t):
if t>3600:
print 'Time taken: %d h %d m %3f s'\
% (np.int(np.floor(t/3600)), np.int(np.floor(np.mod(t,3600)/60)),np.mod(t,60))
elif t>60:
print 'Time taken: %d m %3f s' % (np.int(np.floor(np.mod(t,3600)/60)),np.mod(t,60) )
else:
print 'Time taken: %3f s' % t
# =========================================================================
# =========================================================================
'''------------------------------------------------------------
kergain_sim.py
Automate a simulation of the effectiveness of raw visibility
fitting versus kernel amplitudes
------------------------------------------------------------'''
pupil = 'plain'
try:
a = pysco.kpi('./geometry/'+pupil+'model.pick')
print 'Loaded kernel phase object'
except:
a = pysco.kpi('./geometry/'+pupil+'.txt')
a.name = 'Test'
a.save_to_file('./geometry/'+pupil+'model.pick')
nbuv, nbh = a.nbuv, a.nbh
try:
KerGain = np.loadtxt('KerGain_plain.csv')
print 'Loaded kernel amplitude matrix'
except:
gtfm = np.abs(a.TFM)
U, S, Vh = np.linalg.svd(gtfm.T, full_matrices=1)
S1 = np.zeros(nbuv)
S1[0:nbh-1] = S
nkg = np.size(np.where(abs(S1) < 1e-3))
print nkg
KGCol = np.where(abs(S1) < 1e-3)[0]
KerGain = np.zeros((nkg, nbuv)) # allocate the array
for i in range(nkg):
KerGain[i,:] = (Vh)[KGCol[i],:]
np.savetxt('KerGain_plain.csv',KerGain)
print 'saved'
def make_ellipse(semi_axis,ecc,thick,sz=256,pscale=36.):
semi_axis, thick = semi_axis/pscale, thick/pscale
b = semi_axis*np.sqrt(1-ecc**2.)
bmin = (semi_axis-thick)*np.sqrt(1-ecc**2)
x = np.arange(sz)-sz/2.
xx, yy = np.meshgrid(x,x)
outer = (np.sqrt((xx/semi_axis)**2 + (yy/b)**2)< 1)
inner = (np.sqrt((xx/(semi_axis-thick))**2 + (yy/bmin)**2) >1)
plain = np.ones((sz,sz))
plain[~(outer*inner)] = 0
return plain/plain.sum()
from scipy.special import j1
def vis_gauss(d,u,v):
d = mas2rad(d)
return np.exp(-(np.pi*d*np.sqrt(u**2+v**2))**2/4./np.log(2))
def vis_ud(d,u,v):
r = np.sqrt(u**2+v**2)
t = 2*j1(np.pi*d*r)/(np.pi*d*r)
t[r <=(1/d*1e-5)] = 1.
return t
def vis_ellipse_disk(semi_axis,ecc,theta,u,v):
semi_axis = mas2rad(semi_axis)
thetad = np.pi*theta/180.
u1, v1 = u*np.cos(thetad)+v*np.sin(thetad), -u*np.sin(thetad)+v*np.cos(thetad)
ad, bd = semi_axis, semi_axis*np.sqrt(1-ecc**2.)
u1, v1 = u1*ad, v1*bd
return vis_ud(0.5,u1,v1)
def vis_ellipse_thin(semi_axis,ecc,theta,thick,u,v):
ad, bd = semi_axis, semi_axis*np.sqrt(1.-ecc**2.)
a2, b2 = semi_axis-thick, (semi_axis-thick)*np.sqrt(1.-ecc**2)
n1, n2 = ad*bd, a2*b2
return vis_ellipse_disk(semi_axis,ecc,theta,u,v)-n2/n1*vis_ellipse_disk(semi_axis-thick,ecc,theta,u,v)
def vis_ellipse_gauss(semi_axis,thick,gausswidth,ecc,theta,u,v):
return vis_gauss(gausswidth,u,v)*vis_ellipse_thin(semi_axis,thick,ecc,theta,u,v)
def my_convolve_2d(array1,array2):
return shift(ifft(fft(shift(array1))*fft(shift(array2))))
def my_gauss_blur(array1,gausswidth):
gausswidth *= spaxel
s = np.shape(array1)[0]
x = np.arange(s)-s/2.
xx,yy = np.meshgrid(x,x)
rr = np.sqrt(xx**2 + yy**2)
gauss = np.exp(-(rr/gausswidth)**2)
return np.abs(my_convolve_2d(array1,gauss))
def mk_star_with_ring(psf_temp,ring,con):
dummy = np.abs(my_convolve_2d(ring,psf_temp))
dummy /= dummy.sum()
ff = psf_temp/psf_temp.sum()+dummy/con
return ff/ff.sum()
def make_disk(psf_temp,params,contrast):
dummy = make_ellipse(*params)
return mk_star_with_ring(psf_temp,dummy,contrast)
###-----------------------------------------
### now initialize a simulation
###-----------------------------------------
'''------------------------------
First, set all your parameters.
------------------------------'''
print '\nSimulating a basic PSF'
wavel = 2.5e-6
rprim = 5.093/2.#36903.e-3/2.
rsec= 1.829/2.
pos = [0,0] #m, deg
spaxel = 36.
piston = 0
nimages = 200
reso = rad2mas(wavel/(2*rprim))
print 'Minimum Lambda/D = %.3g mas' % reso
image, imagex = diffract(wavel,rprim,rsec,pos,piston=piston,spaxel=spaxel,seeing=None,verbose=False,\
show_pupil=False,mode=None)
# image = recenter(image,sg_rad=25)
imsz = image.shape[0]
images = np.zeros((nimages,imsz,imsz))
psfs = np.zeros((nimages,imsz,imsz))
'''----------------------------------------
Loop over a range of contrasts
----------------------------------------'''
contrast_list = np.linspace(10,4000.,39)
ncalcs = len(contrast_list)
ksemis, keccs, kthetas, kthicks, kcons = np.zeros(ncalcs), np.zeros(ncalcs),np.zeros(ncalcs), np.zeros(ncalcs), np.zeros(ncalcs)
dksemis, dkeccs, dkthetas, dkthicks, dkcons = np.zeros(ncalcs), np.zeros(ncalcs),np.zeros(ncalcs), np.zeros(ncalcs), np.zeros(ncalcs)
vsemis, veccs, vthetas, vthicks, vcons = np.zeros(ncalcs), np.zeros(ncalcs),np.zeros(ncalcs), np.zeros(ncalcs), np.zeros(ncalcs)
dvsemis, dveccs, dvthetas, dvthicks, dvcons = np.zeros(ncalcs), np.zeros(ncalcs),np.zeros(ncalcs), np.zeros(ncalcs), np.zeros(ncalcs)
t0 = clock()
true_vals = (300.,0.95,100)
amp = 0.1
try:
dummy = fitsio.FITS('psf_cube_scint_%.2f_wavel_%.2f.fits' % (amp,wavel*1e6))
psfs = dummy[0][:,:,:]
print 'Loaded PSFs'
except:
print 'Creating PSFs'
for j in range(nimages):
psfs[j,:,:], imagex = diffract(wavel,rprim,rsec,pos,piston=piston,spaxel=spaxel,
verbose=False,show_pupil=show,mode='amp',
perturbation=None,amp=amp)
fitsio.write('psf_cube_scint_%.2f_wavel_%.2f.fits' % (amp,wavel*1e6),psfs)
imsz = image.shape[0]
print_time(clock()-t0)
'''----------------------------------------
Initialise pysco with a pupil model
----------------------------------------'''
# meter to pixel conversion factor
scale = 1.0
m2pix = mas2rad(spaxel) * imsz/ wavel * scale
uv_samp = a.uv * m2pix + imsz/2 # uv sample coordinates in pixels
x = a.mask[:,0]
y = a.mask[:,1]
rev = 1
ac = shift(fft(shift(image)))
ac /= (np.abs(ac)).max() / a.nbh
uv_samp_rev=np.cast['int'](np.round(uv_samp))
uv_samp_rev[:,0]*=rev
data_cplx=ac[uv_samp_rev[:,1], uv_samp_rev[:,0]]
vis2 = np.abs(data_cplx)
vis2 /= vis2.max() #normalise to the origin
'''----------------------------------------
Now loop over simulated disks
----------------------------------------'''
for trial, contrast in enumerate(contrast_list):
print '\nSimulating for contrast %f' % contrast
thistime = clock()
true_params = [true_vals[0]*4.,true_vals[1],0.,true_vals[2]/float(true_vals[0]),contrast]
for j in range(nimages):
images[j,:,:] = make_disk(psfs[j,:,:],true_vals,contrast)
imsz = images.shape[1]
'''----------------------------------------
Extract Visibilities
----------------------------------------'''
kervises = np.zeros((nimages,KerGain.shape[0]))
vis2s = np.zeros((nimages,vis2.shape[0]))
for j in range(nimages):
image2 = images[j,:,:]
ac2 = shift(fft(shift(image2)))
ac2 /= (np.abs(ac2)).max() / a.nbh
data_cplx2=ac2[uv_samp_rev[:,1], uv_samp_rev[:,0]]
vis2b = np.abs(data_cplx2)
vis2b /= vis2b.max() #normalise to the origin
vis2s[j,:]=vis2b
kervises[j,:] = np.dot(KerGain,vis2b/vis2-1.)
'''----------------------------------------
Now Model
----------------------------------------'''
paramlimits = [50.,10000.,0.,0.99,-90.,90.,0.02,0.49,contrast/4.,contrast*4.]
hdr = {'tel':'HST',
'filter':wavel,
'orient':0}
def vis_model(cube,kpi):
con = 1./cube[4]
u, v = (kpi.uv/wavel).T
unresolved = 1./(1.+con)
flux_ratio = con/(1.+con)
vises = vis_ellipse_thin(cube[0],cube[1],cube[2],cube[0]*cube[3],u,v)
norm = vis_ellipse_thin(cube[0],cube[1],cube[2],cube[0]*cube[3],np.array([1e-5]),np.array([1e-5]))
vises = (vises/norm *flux_ratio + unresolved)
return vises
### define prior and loglikelihood
def kg_loglikelihood(cube,kgd,kge,kpi):
'''Calculate chi2 for single band kernel phase data.
Used both in the MultiNest and MCMC Hammer implementations.'''
vises = vis_model(cube,kpi)
kergains = np.dot(KerGain,vises-1.)
chi2 = np.sum(((kgd-kergains)/kge)**2)
return -chi2/2.
def vis_loglikelihood(cube,vdata,ve,kpi):
'''Calculate chi2 for single band kernel phase data.
Used both in the MultiNest and MCMC Hammer implementations.'''
vises = vis_model(cube,kpi)**2.
chi2 = np.sum(((vdata-vises)/ve)**2)
return -chi2/2.
def myprior(cube, ndim, n_params,paramlimits=paramlimits):
cube[0] = (paramlimits[1] - paramlimits[0])*cube[0]+paramlimits[0]
cube[1] = (paramlimits[3] - paramlimits[2])*cube[1]+paramlimits[2]
cube[2] = (paramlimits[5] - paramlimits[4])*cube[2]+paramlimits[4]
cube[3] = (paramlimits[7] - paramlimits[6])*cube[3]+paramlimits[6]
cube[4] = (paramlimits[9] - paramlimits[8])*cube[4]+paramlimits[8]
'''-----------------------------------------------
First do kernel amplitudes
-----------------------------------------------'''
my_observable = np.mean(kervises,axis=0)
addederror = 0.000001 # in case there are bad frames
my_error = np.sqrt((np.std(kervises,axis=0))**2+addederror**2)
print 'Error:', my_error
def myloglike_kg(cube,ndim,n_params):
try:
loglike = kg_loglikelihood(cube,my_observable,my_error,a)
return loglike
except:
return -np.inf
parameters = ['Semi-major axis','Eccentricity','Position Angle', 'Thickness','Contrast']
n_params = len(parameters)
resume=False
eff=0.3
multi=True,
max_iter= 10000
ndim = n_params
pymultinest.run(myloglike_kg, myprior, n_params,wrapped_params=[2],
verbose=False,resume=False,max_iter=max_iter)
thing = pymultinest.Analyzer(n_params = n_params)
try:
s = thing.get_stats()
ksemis[trial], dksemis[trial] = s['marginals'][0]['median']/4., s['marginals'][0]['sigma']/4.
keccs[trial], dkeccs[trial] = s['marginals'][1]['median'], s['marginals'][1]['sigma']
kthetas[trial], dkthetas[trial] = s['marginals'][2]['median'], s['marginals'][2]['sigma']
kthicks[trial], dkthicks[trial] = s['marginals'][3]['median'], s['marginals'][3]['sigma']
kcons[trial], dkcons[trial] = s['marginals'][4]['median'], s['marginals'][4]['sigma']
stuff = thing.get_best_fit()
best_params = stuff['parameters']
print 'Best params (kg):', best_params
ksemis[trial] = best_params[0]/4.
keccs[trial] = best_params[1]
kthetas[trial] = best_params[2]
kthicks[trial] = best_params[3]
kcons[trial] = best_params[4]
model = np.dot(KerGain,vis_model(best_params,a)-1.)
true_model = np.dot(KerGain,vis_model(true_params,a)-1.)
plt.clf()
plt.errorbar(my_observable,true_model,xerr=my_error,color='b',alpha=0.5,
ls='',markersize=10,linewidth=2.5)
plt.errorbar(my_observable,model,xerr=my_error,color='k',
ls='',markersize=10,linewidth=2.5)
plt.xlabel('Measured Kernel Amplitudes')
plt.ylabel('Model Kernel Amplitudes')
plt.title('Model Fit: Kernel Amplitudes, Contrast %.1f' % contrast)
plt.savefig('kpfit_%.1f_con.png' % contrast)
except:
print 'Failed!'
ksemis[trial], dksemis[trial] = 0,0
keccs[trial], dkeccs[trial] = 0,0
kthetas[trial], dkthetas[trial] = 0,0
kthicks[trial], dkthicks[trial] = 0,0
kcons[trial], dkcons[trial] = 0,0
print 'Kernel amplitudes done'
print_time(clock()-thistime)
print ''
'''-----------------------------------------------
Now do visibilities
-----------------------------------------------'''
my_observable = np.mean((vis2s/vis2)**2,axis=0)
print '\nDoing raw visibilities'
my_error = np.sqrt((np.std((vis2s/vis2)**2,axis=0))**2+addederror**2)
print 'Error:', my_error
def myloglike_vis(cube,ndim,n_params):
try:
loglike = vis_loglikelihood(cube,my_observable,my_error,a)
return loglike
except:
return -np.inf
thistime = clock()
pymultinest.run(myloglike_vis, myprior, n_params,wrapped_params=[2],
verbose=False,resume=False,max_iter=max_iter)
thing = pymultinest.Analyzer(n_params = n_params)
try:
s = thing.get_stats()
vsemis[trial], dvsemis[trial] = s['marginals'][0]['median']/4., s['marginals'][0]['sigma']/4.
veccs[trial], dveccs[trial] = s['marginals'][1]['median'], s['marginals'][1]['sigma']
vthetas[trial], dvthetas[trial] = s['marginals'][2]['median'], s['marginals'][2]['sigma']
vthicks[trial], dvthicks[trial] = s['marginals'][3]['median'], s['marginals'][3]['sigma']
vcons[trial], dvcons[trial] = s['marginals'][4]['median'], s['marginals'][4]['sigma']
stuff = thing.get_best_fit()
best_params = stuff['parameters']
print 'Best params (vis):', best_params
vsemis[trial] = best_params[0]/4.
veccs[trial] = best_params[1]
vthetas[trial] = best_params[2]
vthicks[trial] = best_params[3]
vcons[trial] = best_params[4]
model = vis_model(best_params,a)**2.
true_model = vis_model(true_params,a)**2.
plt.clf()
plt.errorbar(my_observable,true_model,xerr=my_error,color='b',alpha=0.5,
ls='',markersize=10,linewidth=2.5)
plt.errorbar(my_observable,model,xerr=my_error,color='k',
ls='',markersize=10,linewidth=2.5)
plt.xlabel('Measured Square Visibilities')
plt.ylabel('Model Square Visibilities')
plt.title('Model Fit: Visibilities, Contrast %.1f' % contrast)
plt.savefig('visfit_%.1f_con.png' % contrast)
except:
print 'Failed'
vsemis[trial], dvsemis[trial] = 0,0
veccs[trial], dveccs[trial] = 0,0
vthetas[trial], dvthetas[trial] = 0,0
vthicks[trial], dvthicks[trial] = 0,0
vcons[trial], dvcons[trial] = 0,0
print 'Visibilities done'
print_time(clock()-thistime)
'''------------------------------------
Now save!
------------------------------------'''
cmin, cmax = np.min(contrast_list), np.max(contrast_list)
vdata = Table({'Semis':vsemis,
'Eccs':veccs,
'Thetas':vthetas,
'Thicks':vthicks,
'Cons':vcons,
'Dsemis':dvsemis,
'Deccs':dveccs,
'Dthetas':dvthetas,
'Dthicks':dvthicks,
'Dcons':dvcons})
vdata.write('raw_vis_disk_sims_%.0f_%.0f.csv' % (cmin,cmax))
print 'Visibility fits saved to raw_vis_disk_sims_%.0f_%.0f.csv' % (cmin,cmax)
kdata = Table({'Semis':ksemis,
'Eccs':keccs,
'Thetas':kthetas,
'Thicks':kthicks,
'Cons':kcons,
'Dsemis':dksemis,
'Deccs':dkeccs,
'Dthetas':dkthetas,
'Dthicks':dkthicks,
'Dcons':dkcons})
kdata.write('kernel_amplitude_disk_sims_%.0f_%.0f.csv' % (cmin,cmax))
print 'Kernel amplitude fits saved to kernel_amplitude_disk_sims_%.0f_%.0f.csv' \
% (cmin,cmax)
print 'Finished contrast loop'
print_time(clock()-t0) | gpl-3.0 |
NunoEdgarGub1/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/image.py | 69 | 28764 | """
The image module supports basic image loading, rescaling and display
operations.
"""
from __future__ import division
import os, warnings
import numpy as np
from numpy import ma
from matplotlib import rcParams
from matplotlib import artist as martist
from matplotlib import colors as mcolors
from matplotlib import cm
# For clarity, names from _image are given explicitly in this module:
from matplotlib import _image
from matplotlib import _png
# For user convenience, the names from _image are also imported into
# the image namespace:
from matplotlib._image import *
class AxesImage(martist.Artist, cm.ScalarMappable):
zorder = 1
# map interpolation strings to module constants
_interpd = {
'nearest' : _image.NEAREST,
'bilinear' : _image.BILINEAR,
'bicubic' : _image.BICUBIC,
'spline16' : _image.SPLINE16,
'spline36' : _image.SPLINE36,
'hanning' : _image.HANNING,
'hamming' : _image.HAMMING,
'hermite' : _image.HERMITE,
'kaiser' : _image.KAISER,
'quadric' : _image.QUADRIC,
'catrom' : _image.CATROM,
'gaussian' : _image.GAUSSIAN,
'bessel' : _image.BESSEL,
'mitchell' : _image.MITCHELL,
'sinc' : _image.SINC,
'lanczos' : _image.LANCZOS,
'blackman' : _image.BLACKMAN,
}
# reverse interp dict
_interpdr = dict([ (v,k) for k,v in _interpd.items()])
interpnames = _interpd.keys()
def __str__(self):
return "AxesImage(%g,%g;%gx%g)" % tuple(self.axes.bbox.bounds)
def __init__(self, ax,
cmap = None,
norm = None,
interpolation=None,
origin=None,
extent=None,
filternorm=1,
filterrad=4.0,
resample = False,
**kwargs
):
"""
interpolation and cmap default to their rc settings
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
extent is data axes (left, right, bottom, top) for making image plots
registered with data plots. Default is to label the pixel
centers with the zero-based row and column indices.
Additional kwargs are matplotlib.artist properties
"""
martist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
if origin is None: origin = rcParams['image.origin']
self.origin = origin
self._extent = extent
self.set_filternorm(filternorm)
self.set_filterrad(filterrad)
self._filterrad = filterrad
self.set_interpolation(interpolation)
self.set_resample(resample)
self.axes = ax
self._imcache = None
self.update(kwargs)
def get_size(self):
'Get the numrows, numcols of the input image'
if self._A is None:
raise RuntimeError('You must first set the image array')
return self._A.shape[:2]
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
ACCEPTS: float
"""
martist.Artist.set_alpha(self, alpha)
self._imcache = None
def changed(self):
"""
Call this whenever the mappable is changed so observers can
update state
"""
self._imcache = None
self._rgbacache = None
cm.ScalarMappable.changed(self)
def make_image(self, magnification=1.0):
if self._A is None:
raise RuntimeError('You must first set the image array or the image attribute')
xmin, xmax, ymin, ymax = self.get_extent()
dxintv = xmax-xmin
dyintv = ymax-ymin
# the viewport scale factor
sx = dxintv/self.axes.viewLim.width
sy = dyintv/self.axes.viewLim.height
numrows, numcols = self._A.shape[:2]
if sx > 2:
x0 = (self.axes.viewLim.x0-xmin)/dxintv * numcols
ix0 = max(0, int(x0 - self._filterrad))
x1 = (self.axes.viewLim.x1-xmin)/dxintv * numcols
ix1 = min(numcols, int(x1 + self._filterrad))
xslice = slice(ix0, ix1)
xmin_old = xmin
xmin = xmin_old + ix0*dxintv/numcols
xmax = xmin_old + ix1*dxintv/numcols
dxintv = xmax - xmin
sx = dxintv/self.axes.viewLim.width
else:
xslice = slice(0, numcols)
if sy > 2:
y0 = (self.axes.viewLim.y0-ymin)/dyintv * numrows
iy0 = max(0, int(y0 - self._filterrad))
y1 = (self.axes.viewLim.y1-ymin)/dyintv * numrows
iy1 = min(numrows, int(y1 + self._filterrad))
if self.origin == 'upper':
yslice = slice(numrows-iy1, numrows-iy0)
else:
yslice = slice(iy0, iy1)
ymin_old = ymin
ymin = ymin_old + iy0*dyintv/numrows
ymax = ymin_old + iy1*dyintv/numrows
dyintv = ymax - ymin
sy = dyintv/self.axes.viewLim.height
else:
yslice = slice(0, numrows)
if xslice != self._oldxslice or yslice != self._oldyslice:
self._imcache = None
self._oldxslice = xslice
self._oldyslice = yslice
if self._imcache is None:
if self._A.dtype == np.uint8 and len(self._A.shape) == 3:
im = _image.frombyte(self._A[yslice,xslice,:], 0)
im.is_grayscale = False
else:
if self._rgbacache is None:
x = self.to_rgba(self._A, self._alpha)
self._rgbacache = x
else:
x = self._rgbacache
im = _image.fromarray(x[yslice,xslice], 0)
if len(self._A.shape) == 2:
im.is_grayscale = self.cmap.is_gray()
else:
im.is_grayscale = False
self._imcache = im
if self.origin=='upper':
im.flipud_in()
else:
im = self._imcache
fc = self.axes.patch.get_facecolor()
bg = mcolors.colorConverter.to_rgba(fc, 0)
im.set_bg( *bg)
# image input dimensions
im.reset_matrix()
numrows, numcols = im.get_size()
im.set_interpolation(self._interpd[self._interpolation])
im.set_resample(self._resample)
# the viewport translation
tx = (xmin-self.axes.viewLim.x0)/dxintv * numcols
ty = (ymin-self.axes.viewLim.y0)/dyintv * numrows
l, b, r, t = self.axes.bbox.extents
widthDisplay = (round(r) + 0.5) - (round(l) - 0.5)
heightDisplay = (round(t) + 0.5) - (round(b) - 0.5)
widthDisplay *= magnification
heightDisplay *= magnification
im.apply_translation(tx, ty)
# resize viewport to display
rx = widthDisplay / numcols
ry = heightDisplay / numrows
im.apply_scaling(rx*sx, ry*sy)
im.resize(int(widthDisplay+0.5), int(heightDisplay+0.5),
norm=self._filternorm, radius=self._filterrad)
return im
def draw(self, renderer, *args, **kwargs):
if not self.get_visible(): return
if (self.axes.get_xscale() != 'linear' or
self.axes.get_yscale() != 'linear'):
warnings.warn("Images are not supported on non-linear axes.")
im = self.make_image(renderer.get_image_magnification())
im._url = self.get_url()
l, b, widthDisplay, heightDisplay = self.axes.bbox.bounds
clippath, affine = self.get_transformed_clip_path_and_affine()
renderer.draw_image(round(l), round(b), im, self.axes.bbox.frozen(),
clippath, affine)
def contains(self, mouseevent):
"""Test whether the mouse event occured within the image.
"""
if callable(self._contains): return self._contains(self,mouseevent)
# TODO: make sure this is consistent with patch and patch
# collection on nonlinear transformed coordinates.
# TODO: consider returning image coordinates (shouldn't
# be too difficult given that the image is rectilinear
x, y = mouseevent.xdata, mouseevent.ydata
xmin, xmax, ymin, ymax = self.get_extent()
if xmin > xmax:
xmin,xmax = xmax,xmin
if ymin > ymax:
ymin,ymax = ymax,ymin
#print x, y, xmin, xmax, ymin, ymax
if x is not None and y is not None:
inside = x>=xmin and x<=xmax and y>=ymin and y<=ymax
else:
inside = False
return inside,{}
def write_png(self, fname, noscale=False):
"""Write the image to png file with fname"""
im = self.make_image()
if noscale:
numrows, numcols = im.get_size()
im.reset_matrix()
im.set_interpolation(0)
im.resize(numcols, numrows)
im.flipud_out()
rows, cols, buffer = im.as_rgba_str()
_png.write_png(buffer, cols, rows, fname)
def set_data(self, A, shape=None):
"""
Set the image array
ACCEPTS: numpy/PIL Image A"""
# check if data is PIL Image without importing Image
if hasattr(A,'getpixel'):
self._A = pil_to_array(A)
elif ma.isMA(A):
self._A = A
else:
self._A = np.asarray(A) # assume array
if self._A.dtype != np.uint8 and not np.can_cast(self._A.dtype, np.float):
raise TypeError("Image data can not convert to float")
if (self._A.ndim not in (2, 3) or
(self._A.ndim == 3 and self._A.shape[-1] not in (3, 4))):
raise TypeError("Invalid dimensions for image data")
self._imcache =None
self._rgbacache = None
self._oldxslice = None
self._oldyslice = None
def set_array(self, A):
"""
retained for backwards compatibility - use set_data instead
ACCEPTS: numpy array A or PIL Image"""
# This also needs to be here to override the inherited
# cm.ScalarMappable.set_array method so it is not invoked
# by mistake.
self.set_data(A)
def set_extent(self, extent):
"""extent is data axes (left, right, bottom, top) for making image plots
"""
self._extent = extent
xmin, xmax, ymin, ymax = extent
corners = (xmin, ymin), (xmax, ymax)
self.axes.update_datalim(corners)
if self.axes._autoscaleon:
self.axes.set_xlim((xmin, xmax))
self.axes.set_ylim((ymin, ymax))
def get_interpolation(self):
"""
Return the interpolation method the image uses when resizing.
One of 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36', 'hanning',
'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
'bessel', 'mitchell', 'sinc', 'lanczos',
"""
return self._interpolation
def set_interpolation(self, s):
"""
Set the interpolation method the image uses when resizing.
ACCEPTS: ['nearest' | 'bilinear' | 'bicubic' | 'spline16' |
'spline36' | 'hanning' | 'hamming' | 'hermite' | 'kaiser' |
'quadric' | 'catrom' | 'gaussian' | 'bessel' | 'mitchell' |
'sinc' | 'lanczos' | ]
"""
if s is None: s = rcParams['image.interpolation']
s = s.lower()
if s not in self._interpd:
raise ValueError('Illegal interpolation string')
self._interpolation = s
def set_resample(self, v):
if v is None: v = rcParams['image.resample']
self._resample = v
def get_interpolation(self):
return self._resample
def get_extent(self):
'get the image extent: left, right, bottom, top'
if self._extent is not None:
return self._extent
else:
sz = self.get_size()
#print 'sz', sz
numrows, numcols = sz
if self.origin == 'upper':
return (-0.5, numcols-0.5, numrows-0.5, -0.5)
else:
return (-0.5, numcols-0.5, -0.5, numrows-0.5)
def set_filternorm(self, filternorm):
"""Set whether the resize filter norms the weights -- see
help for imshow
ACCEPTS: 0 or 1
"""
if filternorm:
self._filternorm = 1
else:
self._filternorm = 0
def get_filternorm(self):
'return the filternorm setting'
return self._filternorm
def set_filterrad(self, filterrad):
"""Set the resize filter radius only applicable to some
interpolation schemes -- see help for imshow
ACCEPTS: positive float
"""
r = float(filterrad)
assert(r>0)
self._filterrad = r
def get_filterrad(self):
'return the filterrad setting'
return self._filterrad
class NonUniformImage(AxesImage):
def __init__(self, ax,
**kwargs
):
interp = kwargs.pop('interpolation', 'nearest')
AxesImage.__init__(self, ax,
**kwargs)
AxesImage.set_interpolation(self, interp)
def make_image(self, magnification=1.0):
if self._A is None:
raise RuntimeError('You must first set the image array')
x0, y0, v_width, v_height = self.axes.viewLim.bounds
l, b, r, t = self.axes.bbox.extents
width = (round(r) + 0.5) - (round(l) - 0.5)
height = (round(t) + 0.5) - (round(b) - 0.5)
width *= magnification
height *= magnification
im = _image.pcolor(self._Ax, self._Ay, self._A,
height, width,
(x0, x0+v_width, y0, y0+v_height),
self._interpd[self._interpolation])
fc = self.axes.patch.get_facecolor()
bg = mcolors.colorConverter.to_rgba(fc, 0)
im.set_bg(*bg)
im.is_grayscale = self.is_grayscale
return im
def set_data(self, x, y, A):
x = np.asarray(x,np.float32)
y = np.asarray(y,np.float32)
if not ma.isMA(A):
A = np.asarray(A)
if len(x.shape) != 1 or len(y.shape) != 1\
or A.shape[0:2] != (y.shape[0], x.shape[0]):
raise TypeError("Axes don't match array shape")
if len(A.shape) not in [2, 3]:
raise TypeError("Can only plot 2D or 3D data")
if len(A.shape) == 3 and A.shape[2] not in [1, 3, 4]:
raise TypeError("3D arrays must have three (RGB) or four (RGBA) color components")
if len(A.shape) == 3 and A.shape[2] == 1:
A.shape = A.shape[0:2]
if len(A.shape) == 2:
if A.dtype != np.uint8:
A = (self.cmap(self.norm(A))*255).astype(np.uint8)
self.is_grayscale = self.cmap.is_gray()
else:
A = np.repeat(A[:,:,np.newaxis], 4, 2)
A[:,:,3] = 255
self.is_grayscale = True
else:
if A.dtype != np.uint8:
A = (255*A).astype(np.uint8)
if A.shape[2] == 3:
B = zeros(tuple(list(A.shape[0:2]) + [4]), np.uint8)
B[:,:,0:3] = A
B[:,:,3] = 255
A = B
self.is_grayscale = False
self._A = A
self._Ax = x
self._Ay = y
self._imcache = None
def set_array(self, *args):
raise NotImplementedError('Method not supported')
def set_interpolation(self, s):
if s != None and not s in ('nearest','bilinear'):
raise NotImplementedError('Only nearest neighbor and bilinear interpolations are supported')
AxesImage.set_interpolation(self, s)
def get_extent(self):
if self._A is None:
raise RuntimeError('Must set data first')
return self._Ax[0], self._Ax[-1], self._Ay[0], self._Ay[-1]
def set_filternorm(self, s):
pass
def set_filterrad(self, s):
pass
def set_norm(self, norm):
if self._A is not None:
raise RuntimeError('Cannot change colors after loading data')
cm.ScalarMappable.set_norm(self, norm)
def set_cmap(self, cmap):
if self._A is not None:
raise RuntimeError('Cannot change colors after loading data')
cm.ScalarMappable.set_cmap(self, norm)
class PcolorImage(martist.Artist, cm.ScalarMappable):
'''
Make a pcolor-style plot with an irregular rectangular grid.
This uses a variation of the original irregular image code,
and it is used by pcolorfast for the corresponding grid type.
'''
def __init__(self, ax,
x=None,
y=None,
A=None,
cmap = None,
norm = None,
**kwargs
):
"""
cmap defaults to its rc setting
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
Additional kwargs are matplotlib.artist properties
"""
martist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
self.axes = ax
self._rgbacache = None
self.update(kwargs)
self.set_data(x, y, A)
def make_image(self, magnification=1.0):
if self._A is None:
raise RuntimeError('You must first set the image array')
fc = self.axes.patch.get_facecolor()
bg = mcolors.colorConverter.to_rgba(fc, 0)
bg = (np.array(bg)*255).astype(np.uint8)
l, b, r, t = self.axes.bbox.extents
width = (round(r) + 0.5) - (round(l) - 0.5)
height = (round(t) + 0.5) - (round(b) - 0.5)
width = width * magnification
height = height * magnification
if self.check_update('array'):
A = self.to_rgba(self._A, alpha=self._alpha, bytes=True)
self._rgbacache = A
if self._A.ndim == 2:
self.is_grayscale = self.cmap.is_gray()
else:
A = self._rgbacache
vl = self.axes.viewLim
im = _image.pcolor2(self._Ax, self._Ay, A,
height,
width,
(vl.x0, vl.x1, vl.y0, vl.y1),
bg)
im.is_grayscale = self.is_grayscale
return im
def draw(self, renderer, *args, **kwargs):
if not self.get_visible(): return
im = self.make_image(renderer.get_image_magnification())
renderer.draw_image(round(self.axes.bbox.xmin),
round(self.axes.bbox.ymin),
im,
self.axes.bbox.frozen(),
*self.get_transformed_clip_path_and_affine())
def set_data(self, x, y, A):
if not ma.isMA(A):
A = np.asarray(A)
if x is None:
x = np.arange(0, A.shape[1]+1, dtype=np.float64)
else:
x = np.asarray(x, np.float64).ravel()
if y is None:
y = np.arange(0, A.shape[0]+1, dtype=np.float64)
else:
y = np.asarray(y, np.float64).ravel()
if A.shape[:2] != (y.size-1, x.size-1):
print A.shape
print y.size
print x.size
raise ValueError("Axes don't match array shape")
if A.ndim not in [2, 3]:
raise ValueError("A must be 2D or 3D")
if A.ndim == 3 and A.shape[2] == 1:
A.shape = A.shape[:2]
self.is_grayscale = False
if A.ndim == 3:
if A.shape[2] in [3, 4]:
if (A[:,:,0] == A[:,:,1]).all() and (A[:,:,0] == A[:,:,2]).all():
self.is_grayscale = True
else:
raise ValueError("3D arrays must have RGB or RGBA as last dim")
self._A = A
self._Ax = x
self._Ay = y
self.update_dict['array'] = True
def set_array(self, *args):
raise NotImplementedError('Method not supported')
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends
ACCEPTS: float
"""
martist.Artist.set_alpha(self, alpha)
self.update_dict['array'] = True
class FigureImage(martist.Artist, cm.ScalarMappable):
zorder = 1
def __init__(self, fig,
cmap = None,
norm = None,
offsetx = 0,
offsety = 0,
origin=None,
**kwargs
):
"""
cmap is a colors.Colormap instance
norm is a colors.Normalize instance to map luminance to 0-1
kwargs are an optional list of Artist keyword args
"""
martist.Artist.__init__(self)
cm.ScalarMappable.__init__(self, norm, cmap)
if origin is None: origin = rcParams['image.origin']
self.origin = origin
self.figure = fig
self.ox = offsetx
self.oy = offsety
self.update(kwargs)
self.magnification = 1.0
def contains(self, mouseevent):
"""Test whether the mouse event occured within the image.
"""
if callable(self._contains): return self._contains(self,mouseevent)
xmin, xmax, ymin, ymax = self.get_extent()
xdata, ydata = mouseevent.x, mouseevent.y
#print xdata, ydata, xmin, xmax, ymin, ymax
if xdata is not None and ydata is not None:
inside = xdata>=xmin and xdata<=xmax and ydata>=ymin and ydata<=ymax
else:
inside = False
return inside,{}
def get_size(self):
'Get the numrows, numcols of the input image'
if self._A is None:
raise RuntimeError('You must first set the image array')
return self._A.shape[:2]
def get_extent(self):
'get the image extent: left, right, bottom, top'
numrows, numcols = self.get_size()
return (-0.5+self.ox, numcols-0.5+self.ox,
-0.5+self.oy, numrows-0.5+self.oy)
def make_image(self, magnification=1.0):
if self._A is None:
raise RuntimeError('You must first set the image array')
x = self.to_rgba(self._A, self._alpha)
self.magnification = magnification
# if magnification is not one, we need to resize
ismag = magnification!=1
#if ismag: raise RuntimeError
if ismag:
isoutput = 0
else:
isoutput = 1
im = _image.fromarray(x, isoutput)
fc = self.figure.get_facecolor()
im.set_bg( *mcolors.colorConverter.to_rgba(fc, 0) )
im.is_grayscale = (self.cmap.name == "gray" and
len(self._A.shape) == 2)
if ismag:
numrows, numcols = self.get_size()
numrows *= magnification
numcols *= magnification
im.set_interpolation(_image.NEAREST)
im.resize(numcols, numrows)
if self.origin=='upper':
im.flipud_out()
return im
def draw(self, renderer, *args, **kwargs):
if not self.get_visible(): return
# todo: we should be able to do some cacheing here
im = self.make_image(renderer.get_image_magnification())
renderer.draw_image(round(self.ox), round(self.oy), im, self.figure.bbox,
*self.get_transformed_clip_path_and_affine())
def write_png(self, fname):
"""Write the image to png file with fname"""
im = self.make_image()
rows, cols, buffer = im.as_rgba_str()
_png.write_png(buffer, cols, rows, fname)
def imread(fname):
"""
Return image file in *fname* as :class:`numpy.array`.
Return value is a :class:`numpy.array`. For grayscale images, the
return array is MxN. For RGB images, the return value is MxNx3.
For RGBA images the return value is MxNx4.
matplotlib can only read PNGs natively, but if `PIL
<http://www.pythonware.com/products/pil/>`_ is installed, it will
use it to load the image and return an array (if possible) which
can be used with :func:`~matplotlib.pyplot.imshow`.
TODO: support RGB and grayscale return values in _image.readpng
"""
def pilread():
'try to load the image with PIL or return None'
try: import Image
except ImportError: return None
image = Image.open( fname )
return pil_to_array(image)
handlers = {'png' :_png.read_png,
}
basename, ext = os.path.splitext(fname)
ext = ext.lower()[1:]
if ext not in handlers.keys():
im = pilread()
if im is None:
raise ValueError('Only know how to handle extensions: %s; with PIL installed matplotlib can handle more images' % handlers.keys())
return im
handler = handlers[ext]
return handler(fname)
def pil_to_array( pilImage ):
"""
load a PIL image and return it as a numpy array of uint8. For
grayscale images, the return array is MxN. For RGB images, the
return value is MxNx3. For RGBA images the return value is MxNx4
"""
def toarray(im):
'return a 1D array of floats'
x_str = im.tostring('raw',im.mode,0,-1)
x = np.fromstring(x_str,np.uint8)
return x
if pilImage.mode in ('RGBA', 'RGBX'):
im = pilImage # no need to convert images
elif pilImage.mode=='L':
im = pilImage # no need to luminance images
# return MxN luminance array
x = toarray(im)
x.shape = im.size[1], im.size[0]
return x
elif pilImage.mode=='RGB':
#return MxNx3 RGB array
im = pilImage # no need to RGB images
x = toarray(im)
x.shape = im.size[1], im.size[0], 3
return x
else: # try to convert to an rgba image
try:
im = pilImage.convert('RGBA')
except ValueError:
raise RuntimeError('Unknown image mode')
# return MxNx4 RGBA array
x = toarray(im)
x.shape = im.size[1], im.size[0], 4
return x
def thumbnail(infile, thumbfile, scale=0.1, interpolation='bilinear',
preview=False):
"""
make a thumbnail of image in *infile* with output filename
*thumbfile*.
*infile* the image file -- must be PNG or PIL readable if you
have `PIL <http://www.pythonware.com/products/pil/>`_ installed
*thumbfile*
the thumbnail filename
*scale*
the scale factor for the thumbnail
*interpolation*
the interpolation scheme used in the resampling
*preview*
if True, the default backend (presumably a user interface
backend) will be used which will cause a figure to be raised
if :func:`~matplotlib.pyplot.show` is called. If it is False,
a pure image backend will be used depending on the extension,
'png'->FigureCanvasAgg, 'pdf'->FigureCanvasPDF,
'svg'->FigureCanvasSVG
See examples/misc/image_thumbnail.py.
.. htmlonly::
:ref:`misc-image_thumbnail`
Return value is the figure instance containing the thumbnail
"""
basedir, basename = os.path.split(infile)
baseout, extout = os.path.splitext(thumbfile)
im = imread(infile)
rows, cols, depth = im.shape
# this doesn't really matter, it will cancel in the end, but we
# need it for the mpl API
dpi = 100
height = float(rows)/dpi*scale
width = float(cols)/dpi*scale
extension = extout.lower()
if preview:
# let the UI backend do everything
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(width, height), dpi=dpi)
else:
if extension=='.png':
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
elif extension=='.pdf':
from matplotlib.backends.backend_pdf import FigureCanvasPDF as FigureCanvas
elif extension=='.svg':
from matplotlib.backends.backend_svg import FigureCanvasSVG as FigureCanvas
else:
raise ValueError("Can only handle extensions 'png', 'svg' or 'pdf'")
from matplotlib.figure import Figure
fig = Figure(figsize=(width, height), dpi=dpi)
canvas = FigureCanvas(fig)
ax = fig.add_axes([0,0,1,1], aspect='auto', frameon=False, xticks=[], yticks=[])
basename, ext = os.path.splitext(basename)
ax.imshow(im, aspect='auto', resample=True, interpolation='bilinear')
fig.savefig(thumbfile, dpi=dpi)
return fig
| gpl-3.0 |
rgommers/statsmodels | statsmodels/sandbox/distributions/otherdist.py | 33 | 10145 | '''Parametric Mixture Distributions
Created on Sat Jun 04 2011
Author: Josef Perktold
Notes:
Compound Poisson has mass point at zero
http://en.wikipedia.org/wiki/Compound_Poisson_distribution
and would need special treatment
need a distribution that has discrete mass points and contiuous range, e.g.
compound Poisson, Tweedie (for some parameter range),
pdf of Tobit model (?) - truncation with clipping
Question: Metaclasses and class factories for generating new distributions from
existing distributions by transformation, mixing, compounding
'''
from __future__ import print_function
import numpy as np
from scipy import stats
class ParametricMixtureD(object):
'''mixtures with a discrete distribution
The mixing distribution is a discrete distribution like scipy.stats.poisson.
All distribution in the mixture of the same type and parameterized
by the outcome of the mixing distribution and have to be a continuous
distribution (or have a pdf method).
As an example, a mixture of normal distributed random variables with
Poisson as the mixing distribution.
assumes vectorized shape, loc and scale as in scipy.stats.distributions
assume mixing_dist is frozen
initialization looks fragile for all possible cases of lower and upper
bounds of the distributions.
'''
def __init__(self, mixing_dist, base_dist, bd_args_func, bd_kwds_func,
cutoff=1e-3):
'''create a mixture distribution
Parameters
----------
mixing_dist : discrete frozen distribution
mixing distribution
base_dist : continuous distribution
parameterized distributions in the mixture
bd_args_func : callable
function that builds the tuple of args for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty tuple or
a tuple of arrays.
bd_kwds_func : callable
function that builds the dictionary of kwds for the base_dist.
The function obtains as argument the values in the support of
the mixing distribution and should return an empty dictionary or
a dictionary with arrays as values.
cutoff : float
If the mixing distribution has infinite support, then the
distribution is truncated with approximately (subject to integer
conversion) the cutoff probability in the missing tail. Random
draws that are outside the truncated range are clipped, that is
assigned to the highest or lowest value in the truncated support.
'''
self.mixing_dist = mixing_dist
self.base_dist = base_dist
#self.bd_args = bd_args
if not np.isneginf(mixing_dist.dist.a):
lower = mixing_dist.dist.a
else:
lower = mixing_dist.ppf(1e-4)
if not np.isposinf(mixing_dist.dist.b):
upper = mixing_dist.dist.b
else:
upper = mixing_dist.isf(1e-4)
self.ma = lower
self.mb = upper
mixing_support = np.arange(lower, upper+1)
self.mixing_probs = mixing_dist.pmf(mixing_support)
self.bd_args = bd_args_func(mixing_support)
self.bd_kwds = bd_kwds_func(mixing_support)
def rvs(self, size=1):
mrvs = self.mixing_dist.rvs(size)
#TODO: check strange cases ? this assumes continous integers
mrvs_idx = (np.clip(mrvs, self.ma, self.mb) - self.ma).astype(int)
bd_args = tuple(md[mrvs_idx] for md in self.bd_args)
bd_kwds = dict((k, self.bd_kwds[k][mrvs_idx]) for k in self.bd_kwds)
kwds = {'size':size}
kwds.update(bd_kwds)
rvs = self.base_dist.rvs(*self.bd_args, **kwds)
return rvs, mrvs_idx
def pdf(self, x):
x = np.asarray(x)
if np.size(x) > 1:
x = x[...,None] #[None, ...]
bd_probs = self.base_dist.pdf(x, *self.bd_args, **self.bd_kwds)
prob = (bd_probs * self.mixing_probs).sum(-1)
return prob, bd_probs
def cdf(self, x):
x = np.asarray(x)
if np.size(x) > 1:
x = x[...,None] #[None, ...]
bd_probs = self.base_dist.cdf(x, *self.bd_args, **self.bd_kwds)
prob = (bd_probs * self.mixing_probs).sum(-1)
return prob, bd_probs
#try:
class ClippedContinuous(object):
'''clipped continuous distribution with a masspoint at clip_lower
Notes
-----
first version, to try out possible designs
insufficient checks for valid arguments and not clear
whether it works for distributions that have compact support
clip_lower is fixed and independent of the distribution parameters.
The clip_lower point in the pdf has to be interpreted as a mass point,
i.e. different treatment in integration and expect function, which means
none of the generic methods for this can be used.
maybe this will be better designed as a mixture between a degenerate or
discrete and a continuous distribution
Warning: uses equality to check for clip_lower values in function
arguments, since these are floating points, the comparison might fail
if clip_lower values are not exactly equal.
We could add a check whether the values are in a small neighborhood, but
it would be expensive (need to search and check all values).
'''
def __init__(self, base_dist, clip_lower):
self.base_dist = base_dist
self.clip_lower = clip_lower
def _get_clip_lower(self, kwds):
'''helper method to get clip_lower from kwds or attribute
'''
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
clip_lower = kwds.pop('clip_lower')
return clip_lower, kwds
def rvs(self, *args, **kwds):
clip_lower, kwds = self._get_clip_lower(kwds)
rvs_ = self.base_dist.rvs(*args, **kwds)
#same as numpy.clip ?
rvs_[rvs_ < clip_lower] = clip_lower
return rvs_
def pdf(self, x, *args, **kwds):
x = np.atleast_1d(x)
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
#allow clip_lower to be a possible parameter
clip_lower = kwds.pop('clip_lower')
pdf_raw = np.atleast_1d(self.base_dist.pdf(x, *args, **kwds))
clip_mask = (x == self.clip_lower)
if np.any(clip_mask):
clip_prob = self.base_dist.cdf(clip_lower, *args, **kwds)
pdf_raw[clip_mask] = clip_prob
#the following will be handled by sub-classing rv_continuous
pdf_raw[x < clip_lower] = 0
return pdf_raw
def cdf(self, x, *args, **kwds):
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
#allow clip_lower to be a possible parameter
clip_lower = kwds.pop('clip_lower')
cdf_raw = self.base_dist.cdf(x, *args, **kwds)
#not needed if equality test is used
## clip_mask = (x == self.clip_lower)
## if np.any(clip_mask):
## clip_prob = self.base_dist.cdf(clip_lower, *args, **kwds)
## pdf_raw[clip_mask] = clip_prob
#the following will be handled by sub-classing rv_continuous
#if self.a is defined
cdf_raw[x < clip_lower] = 0
return cdf_raw
def sf(self, x, *args, **kwds):
if not 'clip_lower' in kwds:
clip_lower = self.clip_lower
else:
#allow clip_lower to be a possible parameter
clip_lower = kwds.pop('clip_lower')
sf_raw = self.base_dist.sf(x, *args, **kwds)
sf_raw[x <= clip_lower] = 1
return sf_raw
def ppf(self, x, *args, **kwds):
raise NotImplementedError
def plot(self, x, *args, **kwds):
clip_lower, kwds = self._get_clip_lower(kwds)
mass = self.pdf(clip_lower, *args, **kwds)
xr = np.concatenate(([clip_lower+1e-6], x[x>clip_lower]))
import matplotlib.pyplot as plt
#x = np.linspace(-4, 4, 21)
#plt.figure()
plt.xlim(clip_lower-0.1, x.max())
#remove duplicate calculation
xpdf = self.pdf(x, *args, **kwds)
plt.ylim(0, max(mass, xpdf.max())*1.1)
plt.plot(xr, self.pdf(xr, *args, **kwds))
#plt.vline(clip_lower, self.pdf(clip_lower, *args, **kwds))
plt.stem([clip_lower], [mass],
linefmt='b-', markerfmt='bo', basefmt='r-')
return
if __name__ == '__main__':
doplots = 1
#*********** Poisson-Normal Mixture
mdist = stats.poisson(2.)
bdist = stats.norm
bd_args_fn = lambda x: ()
#bd_kwds_fn = lambda x: {'loc': np.atleast_2d(10./(1+x))}
bd_kwds_fn = lambda x: {'loc': x, 'scale': 0.1*np.ones_like(x)} #10./(1+x)}
pd = ParametricMixtureD(mdist, bdist, bd_args_fn, bd_kwds_fn)
print(pd.pdf(1))
p, bp = pd.pdf(np.linspace(0,20,21))
pc, bpc = pd.cdf(np.linspace(0,20,21))
print(pd.rvs())
rvs, m = pd.rvs(size=1000)
if doplots:
import matplotlib.pyplot as plt
plt.hist(rvs, bins = 100)
plt.title('poisson mixture of normal distributions')
#********** clipped normal distribution (Tobit)
bdist = stats.norm
clip_lower_ = 0. #-0.5
cnorm = ClippedContinuous(bdist, clip_lower_)
x = np.linspace(1e-8, 4, 11)
print(cnorm.pdf(x))
print(cnorm.cdf(x))
if doplots:
#plt.figure()
#cnorm.plot(x)
plt.figure()
cnorm.plot(x = np.linspace(-1, 4, 51), loc=0.5, scale=np.sqrt(2))
plt.title('clipped normal distribution')
fig = plt.figure()
for i, loc in enumerate([0., 0.5, 1.,2.]):
fig.add_subplot(2,2,i+1)
cnorm.plot(x = np.linspace(-1, 4, 51), loc=loc, scale=np.sqrt(2))
plt.title('clipped normal, loc = %3.2f' % loc)
loc = 1.5
rvs = cnorm.rvs(loc=loc, size=2000)
plt.figure()
plt.hist(rvs, bins=50)
plt.title('clipped normal rvs, loc = %3.2f' % loc)
#plt.show()
| bsd-3-clause |
eramirem/astroML | doc/sphinxext/exfile.py | 3 | 4259 | """
Execfile is a tool that enables open a python script, extracting the
file-level docstring, executing the file, and saving the resulting
matplotlib figures.
"""
from __future__ import print_function, division
import sys
import os
import traceback
import token
import tokenize
import gc
import matplotlib
matplotlib.use('Agg') #don't display plots
import pylab as plt
class ExecFile(object):
"""Execute the file and store the output, docstring, and
sequence of matplotlib figures
"""
def __init__(self, filename, execute=True, print_output=True):
self.filename = filename
self.extract_docstring()
self.figlist = []
self.output = ''
self.stdout = sys.stdout
self.print_output = print_output
if execute:
self.execute_file()
def save_figures(self, fmt):
from matplotlib import colors
for fig in self.figlist:
figfile = fmt % fig.number
print("saving", figfile)
# if black background, save with black background as well.
if colors.colorConverter.to_rgb(fig.get_facecolor()) == (0, 0, 0):
fig.savefig(figfile,
facecolor='k',
edgecolor='none')
fig.close()
else:
fig.savefig(figfile)
fig.close()
def write(self, s):
if self.print_output:
self.stdout.write(s)
self.output += s
def flush(self):
if self.print_output:
self.stdout.flush()
def extract_docstring(self):
""" Extract a module-level docstring
"""
lines = open(self.filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(lines.__iter__().next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
self.docstring = docstring
self.short_desc = first_par
self.end_line = erow + 1 + start_row
def execute_file(self):
"""Execute the file, catching standard output
and matplotlib figures
"""
dirname, fname = os.path.split(self.filename)
print('plotting %s' % fname)
# close any currently open figures
plt.close('all')
# change to file directory for execution
cwd = os.getcwd()
try:
if dirname:
os.chdir(dirname)
# set stdout to self in order to catch output (with write method)
sys.stdout = self
# execute the file
with open(os.path.basename(self.filename)) as f:
code = compile(f.read(), "somefile.py", 'exec')
exec(code, {'pl' : plt, 'plt' : plt, 'pylab' : plt})
fig_mgr_list = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
self.figlist = [manager.canvas.figure for manager in fig_mgr_list]
self.figlist = sorted(self.figlist,
key = lambda fig: fig.number)
except:
print(80 * '_')
print('{0} is not compiling:'.format(fname)
traceback.print_exc()
print(80 * '_')
finally:
# change back to original directory, and reset sys.stdout
sys.stdout = self.stdout
os.chdir(cwd)
ncol = gc.collect()
if self.print_output and (ncol > 0):
print("\n > collected {0} unreachable objects".format(ncol))
| bsd-2-clause |
trungnt13/scikit-learn | examples/ensemble/plot_random_forest_embedding.py | 286 | 3531 | """
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result using PCA
pca = TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("PCA reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, m_max] x [y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
| bsd-3-clause |
georgid/SourceFilterContoursMelody | smstools/software/models_interface/hprModel_function.py | 1 | 3840 | # function to call the extractHarmSpec analysis/synthesis functions in software/models/hprModel.py
import numpy as np
import matplotlib.pyplot as plt
import os, sys
from scipy.signal import get_window
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
import hprModel as HPR
import stft as STFT
def extractHarmSpec(inputFile='/home/georgid/Documents/iKala/Wavfile/mono/31112_chorus.wav', window='blackman', M=601, N=1024, t=-100,
minSineDur=0.1, nH=100, minf0=350, maxf0=700, f0et=5, harmDevSlope=0.01):
"""
Perform analysis/synthesis using the harmonic plus residual model
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size; N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks; minSineDur: minimum duration of sinusoidal tracks
nH: maximum number of harmonics; minf0: minimum fundamental frequency in sound
maxf0: maximum fundamental frequency in sound; f0et: maximum error accepted in f0 detection algorithm
harmDevSlope: allowed deviation of harmonic tracks, higher harmonics have higher allowed deviation
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
(fs, x) = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# find harmonics and residual
# HPR.hprModel(x, fs, w, N, t, nH, minf0, maxf0, f0et)
hfreq, hmag, hphase, xr = HPR.hprModelAnal(x, fs, w, N, H, t, minSineDur, nH, minf0, maxf0, f0et, harmDevSlope)
# compute spectrogram of residual
mXr, pXr = STFT.stftAnal(xr, fs, w, N, H)
# synthesize hpr model
y, yh = HPR.hprModelSynth(hfreq, hmag, hphase, xr, Ns, H, fs)
# output sound file (monophonic with sampling rate of 44100)
outputFileSines = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hprModel_sines.wav'
outputFileResidual = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hprModel_residual.wav'
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_hprModel.wav'
# write sounds files for harmonics, residual, and the sum
UF.wavwrite(yh, fs, outputFileSines)
UF.wavwrite(xr, fs, outputFileResidual)
UF.wavwrite(y, fs, outputFile)
# create figure to plot
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 5000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
# plot the magnitude spectrogram of residual
plt.subplot(3,1,2)
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mXr[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mXr[:,:maxplotbin+1]))
plt.autoscale(tight=True)
# plot harmonic frequencies on residual spectrogram
if (hfreq.shape[1] > 0):
harms = hfreq*np.less(hfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time(s)')
plt.ylabel('frequency(Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + residual spectrogram')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
extractHarmSpec()
| gpl-3.0 |
miloharper/neural-network-animation | matplotlib/tests/test_delaunay.py | 14 | 7090 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import warnings
import numpy as np
from matplotlib.testing.decorators import image_comparison, knownfailureif
from matplotlib.cbook import MatplotlibDeprecationWarning
with warnings.catch_warnings():
# the module is deprecated. The tests should be removed when the module is.
warnings.simplefilter('ignore', MatplotlibDeprecationWarning)
from matplotlib.delaunay.triangulate import Triangulation
from matplotlib import pyplot as plt
import matplotlib as mpl
def constant(x, y):
return np.ones(x.shape, x.dtype)
constant.title = 'Constant'
def xramp(x, y):
return x
xramp.title = 'X Ramp'
def yramp(x, y):
return y
yramp.title = 'Y Ramp'
def exponential(x, y):
x = x*9
y = y*9
x1 = x+1.0
x2 = x-2.0
x4 = x-4.0
x7 = x-7.0
y1 = x+1.0
y2 = y-2.0
y3 = y-3.0
y7 = y-7.0
f = (0.75 * np.exp(-(x2*x2+y2*y2)/4.0) +
0.75 * np.exp(-x1*x1/49.0 - y1/10.0) +
0.5 * np.exp(-(x7*x7 + y3*y3)/4.0) -
0.2 * np.exp(-x4*x4 -y7*y7))
return f
exponential.title = 'Exponential and Some Gaussians'
def cliff(x, y):
f = np.tanh(9.0*(y-x) + 1.0)/9.0
return f
cliff.title = 'Cliff'
def saddle(x, y):
f = (1.25 + np.cos(5.4*y))/(6.0 + 6.0*(3*x-1.0)**2)
return f
saddle.title = 'Saddle'
def gentle(x, y):
f = np.exp(-5.0625*((x-0.5)**2+(y-0.5)**2))/3.0
return f
gentle.title = 'Gentle Peak'
def steep(x, y):
f = np.exp(-20.25*((x-0.5)**2+(y-0.5)**2))/3.0
return f
steep.title = 'Steep Peak'
def sphere(x, y):
circle = 64-81*((x-0.5)**2 + (y-0.5)**2)
f = np.where(circle >= 0, np.sqrt(np.clip(circle,0,100)) - 0.5, 0.0)
return f
sphere.title = 'Sphere'
def trig(x, y):
f = 2.0*np.cos(10.0*x)*np.sin(10.0*y) + np.sin(10.0*x*y)
return f
trig.title = 'Cosines and Sines'
def gauss(x, y):
x = 5.0-10.0*x
y = 5.0-10.0*y
g1 = np.exp(-x*x/2)
g2 = np.exp(-y*y/2)
f = g1 + 0.75*g2*(1 + g1)
return f
gauss.title = 'Gaussian Peak and Gaussian Ridges'
def cloverleaf(x, y):
ex = np.exp((10.0-20.0*x)/3.0)
ey = np.exp((10.0-20.0*y)/3.0)
logitx = 1.0/(1.0+ex)
logity = 1.0/(1.0+ey)
f = (((20.0/3.0)**3 * ex*ey)**2 * (logitx*logity)**5 *
(ex-2.0*logitx)*(ey-2.0*logity))
return f
cloverleaf.title = 'Cloverleaf'
def cosine_peak(x, y):
circle = np.hypot(80*x-40.0, 90*y-45.)
f = np.exp(-0.04*circle) * np.cos(0.15*circle)
return f
cosine_peak.title = 'Cosine Peak'
allfuncs = [exponential, cliff, saddle, gentle, steep, sphere, trig, gauss, cloverleaf, cosine_peak]
class LinearTester(object):
name = 'Linear'
def __init__(self, xrange=(0.0, 1.0), yrange=(0.0, 1.0), nrange=101, npoints=250):
self.xrange = xrange
self.yrange = yrange
self.nrange = nrange
self.npoints = npoints
rng = np.random.RandomState(1234567890)
self.x = rng.uniform(xrange[0], xrange[1], size=npoints)
self.y = rng.uniform(yrange[0], yrange[1], size=npoints)
self.tri = Triangulation(self.x, self.y)
def replace_data(self, dataset):
self.x = dataset.x
self.y = dataset.y
self.tri = Triangulation(self.x, self.y)
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.linear_extrapolator(z, bbox=self.xrange+self.yrange)
def plot(self, func, interp=True, plotter='imshow'):
if interp:
lpi = self.interpolator(func)
z = lpi[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
else:
y, x = np.mgrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
z = func(x, y)
z = np.where(np.isinf(z), 0.0, z)
extent = (self.xrange[0], self.xrange[1],
self.yrange[0], self.yrange[1])
fig = plt.figure()
plt.hot() # Some like it hot
if plotter == 'imshow':
plt.imshow(np.nan_to_num(z), interpolation='nearest', extent=extent, origin='lower')
elif plotter == 'contour':
Y, X = np.ogrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
plt.contour(np.ravel(X), np.ravel(Y), z, 20)
x = self.x
y = self.y
lc = mpl.collections.LineCollection(np.array([((x[i], y[i]), (x[j], y[j]))
for i, j in self.tri.edge_db]), colors=[(0,0,0,0.2)])
ax = plt.gca()
ax.add_collection(lc)
if interp:
title = '%s Interpolant' % self.name
else:
title = 'Reference'
if hasattr(func, 'title'):
plt.title('%s: %s' % (func.title, title))
else:
plt.title(title)
class NNTester(LinearTester):
name = 'Natural Neighbors'
def interpolator(self, func):
z = func(self.x, self.y)
return self.tri.nn_extrapolator(z, bbox=self.xrange+self.yrange)
def make_all_2d_testfuncs(allfuncs=allfuncs):
def make_test(func):
filenames = [
'%s-%s' % (func.__name__, x) for x in
['ref-img', 'nn-img', 'lin-img', 'ref-con', 'nn-con', 'lin-con']]
# We only generate PNGs to save disk space -- we just assume
# that any backend differences are caught by other tests.
@image_comparison(filenames, extensions=['png'],
freetype_version=('2.4.5', '2.4.9'),
remove_text=True)
def reference_test():
nnt.plot(func, interp=False, plotter='imshow')
nnt.plot(func, interp=True, plotter='imshow')
lpt.plot(func, interp=True, plotter='imshow')
nnt.plot(func, interp=False, plotter='contour')
nnt.plot(func, interp=True, plotter='contour')
lpt.plot(func, interp=True, plotter='contour')
tester = reference_test
tester.__name__ = str('test_%s' % func.__name__)
return tester
nnt = NNTester(npoints=1000)
lpt = LinearTester(npoints=1000)
for func in allfuncs:
globals()['test_%s' % func.__name__] = make_test(func)
make_all_2d_testfuncs()
# 1d and 0d grid tests
ref_interpolator = Triangulation([0,10,10,0],
[0,0,10,10]).linear_interpolator([1,10,5,2.0])
def test_1d_grid():
res = ref_interpolator[3:6:2j,1:1:1j]
assert np.allclose(res, [[1.6],[1.9]], rtol=0)
def test_0d_grid():
res = ref_interpolator[3:3:1j,1:1:1j]
assert np.allclose(res, [[1.6]], rtol=0)
@image_comparison(baseline_images=['delaunay-1d-interp'], extensions=['png'])
def test_1d_plots():
x_range = slice(0.25,9.75,20j)
x = np.mgrid[x_range]
ax = plt.gca()
for y in xrange(2,10,2):
plt.plot(x, ref_interpolator[x_range,y:y:1j])
ax.set_xticks([])
ax.set_yticks([])
| mit |
jseabold/scikit-learn | examples/exercises/plot_cv_digits.py | 135 | 1223 | """
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn import datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
| bsd-3-clause |
nyu-dl/dl4mt-simul-trans | insepection.py | 1 | 9509 | # -*- coding: utf-8 -*-
import matplotlib
# matplotlib.use('agg')
import copy
import numpy
import os
import seaborn as sns
import pandas as pd
sns.set(context="paper", font="monospace", style='whitegrid')
from matplotlib import pyplot as plot
from matplotlib import rc
rc('font',**{'family':'Verdana', 'weight': 'normal'})
rc('font', size=8)
rc('text', usetex=True)
rc('text.latex',unicode=True)
rc('text.latex',preamble='\usepackage[utf8]{inputenc}')
rc('text.latex',preamble='\usepackage[russian]{babel}')
rc('text.latex',preamble='\usepackage[german]{babel}')
rc('text.latex',preamble='\usepackage[ngerman]{babel}')
matplotlib.rcParams['ytick.labelsize'] = 11
matplotlib.rcParams['xtick.labelsize'] = 11
def heatmap(sources, refs, trans, actions, idx, atten=None, savefig=True, name='test', info=None, show=False):
source = [s.strip() for s in sources[idx].decode('utf8').replace('@@', '--').split()] + ['||']
target = ['*'] + [s.strip() for s in trans[idx].decode('utf8').replace('@@', '--').split()] + ['||']
action = actions[idx]
if atten:
attention = numpy.array(atten[idx])
def track(acts, data, annote):
x, y = 0, 0
for a in acts:
x += a
y += 1 - a
# print a, x, y, target[x].encode('utf8')
data[y, x] = 1
annote[y, x] = 'W' if a == 0 else 'C'
return data, annote
# print target
data = numpy.zeros((len(source), len(target)))
annote = numpy.chararray(data.shape, itemsize=8)
annote[:] = ''
data, annote = track(action, data, annote)
data[0, 0] = 1
annote[0, 0] = 'S'
if atten:
data[:-1, 1:] += attention.T
d = pd.DataFrame(data=data, columns=target, index=source)
# p = sns.diverging_palette(220, 10, as_cmap=True)
f, ax = plot.subplots(figsize=(11, 11))
f.set_canvas(plot.gcf().canvas)
g = sns.heatmap(d, ax=ax, annot=annote, fmt='s')
g.xaxis.tick_top()
plot.xticks(rotation=90)
plot.yticks(rotation=0)
# plot.show()
if savefig:
if not os.path.exists('.images/C_{}'.format(name)):
os.mkdir('.images/C_{}'.format(name))
filename = 'Idx={}||'.format(info['index'])
for w in info:
if w is not 'index':
filename += '.{}={:.2f}'.format(w, float(info[w]))
print 'saving...'
f.savefig('.images/C_{}'.format(name) + '/{}'.format(filename) + '.pdf', dpi=100)
if show:
plot.show()
print 'plotting done.'
plot.close()
def heatmap2(sources, refs, trans, actions, idx, atten=None, full_atten=None, savefig=True, name='test', info=None, show=False):
source = ['*'] + [s.strip() for s in sources[idx].decode('utf8').replace('@@', '--').split()] + ['||']
target = ['*'] + [s.strip() for s in trans[idx].decode('utf8').replace('@@', '--').split()] + ['||'] + ['*']
action = actions[idx]
flag = 0
if atten:
attention = numpy.array(atten[idx])
else:
attention = None
if full_atten:
fullatten = numpy.array(full_atten[idx])
else:
fullatten = None
def track(acts, data, annote):
x, y, z = 0, 0, 0
for a in acts:
x += (a == 1)
y += (a == 0)
z += (a == 2)
# data[y + 1, x] = 1
# data[z, x + 1] = 1
# annote[y, x] = 'W' if a == 0 else 'C'
return data, annote
# print target
data = numpy.zeros((len(source), len(target)))
annote = numpy.chararray(data.shape, itemsize=8)
annote[:] = ''
data, annote = track(action, data, annote)
data[1, 0] = 1
def draw(data_t, ax, attention=None):
data = copy.copy(data_t)
data[1:-1, 1:-1] += attention.T
d = pd.DataFrame(data=data, columns=target, index=source)
# p = sns.diverging_palette(220, 10, as_cmap=True)
g = sns.heatmap(d, mask=(data==0), square=True, cbar=False, linewidths=0.1, ax=ax, annot=annote, fmt='s')
g.xaxis.tick_top()
for tick in ax.get_xticklabels():
tick.set_rotation(90)
for tick in ax.get_yticklabels():
tick.set_rotation(0)
ax.grid(True)
f, [ax1, ax2] = plot.subplots(1, 2, figsize=(22, 11))
f.set_canvas(plot.gcf().canvas)
draw(data, ax1, attention)
# plot.xticks(rotation=90)
# plot.yticks(rotation=0)
# plot.grid()
draw(data, ax2, fullatten)
# plot.xticks(rotation=90)
# plot.yticks(rotation=0)
# plot.grid()
if savefig:
if not os.path.exists('.images/M_{}'.format(name)):
os.mkdir('.images/M_{}'.format(name))
filename = 'Idx={}||'.format(info['index'])
for w in info:
if w is not 'index':
filename += '.{}={:.2f}'.format(w, float(info[w]))
# print 'saving...'
plot.savefig('.images/M_{}'.format(name) + '/{}'.format(filename) + '.pdf', dpi=100)
if show:
plot.show()
# print 'plotting done.'
plot.close()
def visualize(sources, refs, trans, aligns, idx, savefig=True, name='test', info=None):
colors = ['b', 'g']
fig = plot.figure(figsize=(20, 2))
ax = plot.gca()
# plot.hold('on')
plot.xlim([0., 10.])
scolors = []
caidx = 0
coloridx = 0
for sidx in xrange(len([s_.replace('@@', '--').strip() for s_ in sources[idx].split()] + ['<eos>'])):
if caidx >= len(numpy.unique(aligns[idx])) or sidx >= numpy.unique(aligns[idx])[caidx]:
caidx = caidx + 1
coloridx = 1 - coloridx
scolors.append(colors[coloridx])
tcolors = []
lastidx = -1
coloridx = 1
for tt in aligns[idx]:
if tt != lastidx:
lastidx = tt
coloridx = 1 - coloridx
tcolors.append(colors[coloridx])
x, y = 0., 1.
s_pos = [(x, y)]
for ii, ss in enumerate([s_.replace('@@', '--').strip() for s_ in sources[idx].split()] + ['<eos>']):
ss.replace('%', '\%')
xx = plot.text(x, y, ss)
xx.set_bbox(dict(color=scolors[ii], alpha=0.1, edgecolor=scolors[ii]))
xx._renderer = fig.canvas.get_renderer()
wext = xx.get_window_extent()
bbox = ax.transData.inverted().transform(wext)
x = bbox[1, 0] + 0.
s_pos.append((x, y))
s_pos.append((bbox[1, 0], y))
x, y = 0., .95
t_pos = []
for ii, ss in enumerate([s_.decode('utf8').replace('@@', '--') for s_ in trans[idx].split()]):
ss.replace('%', '\%')
xx = plot.text(x, y, ss)
xx._renderer = fig.canvas.get_renderer()
wext = xx.get_window_extent()
bbox = ax.transData.inverted().transform(wext)
t_pos.append((bbox[0, 0], bbox[0, 1] + 0.03))
x = bbox[1, 0] + 0.
t_pos.append((bbox[1, 0], bbox[0, 1] + 0.03))
lasttidx = 0
lastidx = -1
for tidx, sidx in enumerate(aligns[idx]):
if lastidx != sidx:
lastidx = sidx
lasttidx = tidx
sidx = numpy.minimum(sidx, len(s_pos) - 1)
plot.arrow(s_pos[sidx][0], s_pos[sidx][1],
t_pos[tidx][0] - s_pos[sidx][0],
t_pos[tidx][1] - s_pos[sidx][1],
head_width=0., head_length=0.,
fc=tcolors[tidx], ec=tcolors[tidx],
linestyle='dotted', width=0.0001)
for tt in xrange(tidx, len(aligns[idx])):
if aligns[idx][tt] != sidx:
plot.arrow(s_pos[sidx][0], s_pos[sidx][1],
t_pos[tt][0] - s_pos[sidx][0],
t_pos[tt][1] - s_pos[sidx][1],
head_width=0., head_length=0.,
fc=tcolors[tidx], ec=tcolors[tidx],
linestyle='dotted', width=0.0001)
plot.fill_between([t_pos[tidx][0], s_pos[sidx][0], t_pos[tt][0]],
[t_pos[tidx][1], s_pos[sidx][1], t_pos[tt][1]],
facecolor=tcolors[tidx], alpha=0.1)
break
plot.arrow(s_pos[sidx][0], s_pos[sidx][1],
t_pos[-1][0] - s_pos[sidx][0],
t_pos[-1][1] - s_pos[sidx][1],
head_width=0., head_length=0.,
fc=tcolors[-1], ec=tcolors[-1],
linestyle='dotted', width=0.0001)
plot.fill_between([t_pos[lasttidx][0], s_pos[sidx][0], t_pos[-1][0]],
[t_pos[lasttidx][1], s_pos[sidx][1], t_pos[-1][1]],
facecolor=tcolors[tidx], alpha=0.1)
# plot.hold('off')
plot.axis('off')
plot.ylim([0.95, 1.01])
plot.tight_layout()
if savefig:
if not os.path.exists('.images/{}'.format(name)):
os.mkdir('.images/{}'.format(name))
filename = 'Idx={}||'.format(info['index'])
for w in info:
if w is not 'index':
filename += '.{}={:.2f}'.format(w, float(info[w]))
plot.savefig('.images/{}'.format(name) + '/{}'.format(filename) + '.pdf', dpi=300)
print 'plotting done.'
plot.close()
# plot.show()
if __name__ == "__main__":
sources = ['I cannot understand .']
targets = ['Ich verstehe nicht .']
actions = [[0, 0, 1, 1, 2, 0, 1, 2, 2, 0, 1]]
heatmap2(sources, targets, targets, actions, 0, savefig=False, show=True)
| bsd-3-clause |
kjs73/pele | playground/plate_folding/geometric_folding.py | 5 | 10127 | from itertools import izip
import numpy as np
from pele.angleaxis import RBTopology, RBSystem, RigidFragment, RBPotentialWrapper
from pele.potentials import BasePotential
from pele.utils import rotations
from plate_potential import PlatePotential
EDGE1_TYPE = "O"
EDGE2_TYPE = "C"
EDGE3_TYPE = "N"
OTHER_TYPE = "H"
def draw_pymol(coords):
import pele.utils.pymolwrapper as pym
pym.start()
pym.draw_spheres(coords, "A", 1)
#class HarmonicPotential(BasePotential):
# def __init__(self, atoms1, atoms2):
# self.atoms1 = np.array(atoms1)
# self.atoms2 = np.array(atoms2)
#
# def getEnergy(self, x):
# e, g = self.getEnergyGradient(x)
# return e
#
# def getEnergyGradient(self, x):
# x = x.reshape([-1,3])
# grad = np.zeros(x.shape)
# etot = 0.
#
# for a1, a2 in izip(self.atoms1, self.atoms2):
# dx = x[a1,:] - x[a2,:]
# r2 = np.sum(dx**2)
# etot += 0.5 * r2
# grad[a1,:] += dx
# grad[a2,:] -= dx
# return etot, grad.ravel()
class MolAtomIndexParser(object):
"""this tool helps with getting the correct indices for the atoms on a given edge of a plate"""
def __init__(self, aatopology, nrigid):
self.aatopology = aatopology
atomtypes = self.aatopology.get_atomtypes()
self.atom_types = np.array(atomtypes).reshape([nrigid, -1])
self.atoms_per_mol = self.atom_types.shape[1]
def get_atom_indices(self, molecule_number, atom_type):
mol_atom_types = self.atom_types[molecule_number, :]
atoms = np.where(mol_atom_types == atom_type)[0]
atoms += molecule_number * self.atoms_per_mol
atoms.sort()
return list(atoms)
#class CombinePotential(BasePotential):
# def __init__(self, potentials):
# self.potentials = potentials
#
# def getEnergy(self, coords):
# e = 0
# for pot in self.potentials:
# e += pot.getEnergy(coords)
# return e
#
# def getEnergyGradient(self, coords):
# etot = 0
# grad = np.zeros(coords.size)
# for pot in self.potentials:
# e, g = pot.getEnergyGradient(coords)
# etot += e
# grad += g
# return etot, grad.ravel()
def make_triangular_plate(atoms_per_side=8):
"""construct a single triangular plate
"""
theta = 60. * np.pi / 180.
v1 = np.array([1,0,0])
v2 = np.array([0.5, np.sin(theta), 0])
aps = atoms_per_side
plate = RigidFragment()
for i in xrange(aps-1):
for j in xrange(aps-1):
if i + j >= aps-1:
break
xnew = v1*i + v2*j
if (i == 0 and j == 0 or
i == 0 and j == aps-2 or
i == aps-2 and j == 0):
atomtype = OTHER_TYPE
elif i == 0:
atomtype = EDGE1_TYPE
elif j == 0:
atomtype = EDGE2_TYPE
elif i + j == aps-2:
atomtype = EDGE3_TYPE
else:
atomtype = OTHER_TYPE
plate.add_atom(atomtype, xnew, 1)
# draw(coords)
plate.finalize_setup()
return plate
class PlateFolder(RBSystem):
"""
This will build a system class for a cluster of interacting plates
"""
def __init__(self, nmol):
self.nrigid = nmol
super(PlateFolder, self).__init__()
self.setup_params(self.params)
self._create_potential()
def get_random_configuration(self):
# js850> this is a bit sketchy because nrigid might not be defined here.
# probably we can get the number of molecules some other way.
coords = 10.*np.random.random(6*self.nrigid)
ca = self.aasystem.coords_adapter(coords)
for p in ca.rotRigid:
p[:] = rotations.random_aa()
return coords
def setup_aatopology(self):
"""this sets up the topology for the whole rigid body system"""
topology = RBTopology()
topology.add_sites([make_triangular_plate() for i in xrange(self.nrigid)])
self.render_scale = 0.2
self.atom_types = topology.get_atomtypes()
self.draw_bonds = []
# for i in xrange(self.nrigid):
# self.draw_bonds.append((3*i, 3*i+1))
# self.draw_bonds.append((3*i, 3*i+2))
topology.finalize_setup()
return topology
def setup_params(self, params):
"""set some system dependent parameters to imrprove algorithm performance"""
params.double_ended_connect.local_connect_params.tsSearchParams.iprint = 10
nebparams = params.double_ended_connect.local_connect_params.NEBparams
nebparams.max_images = 50
nebparams.image_density = 5
nebparams.iter_density = 10.
nebparams.k = 5.
nebparams.reinterpolate = 50
nebparams.NEBquenchParams["iprint"] = 10
tssearch = params.double_ended_connect.local_connect_params.tsSearchParams
tssearch.nsteps_tangent1 = 10
tssearch.nsteps_tangent2 = 30
tssearch.lowestEigenvectorQuenchParams["nsteps"] = 50
tssearch.iprint=1
tssearch.nfail_max = 100
params.takestep.translate = 5.
def _create_potential(self):
"""construct the potential which will compute the energy and gradient in atomistic (cartesian) coordinates
The bonded (hinged) sides interact with an attractive harmonic potential. Each atom
in the bond has a single interaction partner.
The loosly attractive sides interact with an LJ potential. These interactions are
not specific. Each atom interacts with every other one.
All atoms repel each other with a WCA potential.
"""
parser = MolAtomIndexParser(self.aatopology, self.nrigid)
# this is currently only set up for a tetrahedron
assert self.nrigid == 4
# do hinges
harmonic_atoms1 = []
harmonic_atoms2 = []
harmonic_atoms1 += parser.get_atom_indices(0, EDGE1_TYPE)
harmonic_atoms2 += parser.get_atom_indices(1, EDGE1_TYPE)
harmonic_atoms1 += parser.get_atom_indices(0, EDGE2_TYPE)
harmonic_atoms2 += parser.get_atom_indices(2, EDGE1_TYPE)
harmonic_atoms1 += parser.get_atom_indices(0, EDGE3_TYPE)
harmonic_atoms2 += parser.get_atom_indices(3, EDGE1_TYPE)
self.harmonic_atoms = np.array(harmonic_atoms1 + harmonic_atoms2, np.integer)
harmonic_atoms1 = np.array(harmonic_atoms1, dtype=np.integer).ravel()
harmonic_atoms2 = np.array(harmonic_atoms2, dtype=np.integer).ravel()
for i, j in izip(harmonic_atoms1, harmonic_atoms2):
self.draw_bonds.append((i,j))
# do attractive part
lj_atoms = []
lj_atoms += parser.get_atom_indices(1, EDGE2_TYPE)
lj_atoms += parser.get_atom_indices(1, EDGE3_TYPE)
lj_atoms += parser.get_atom_indices(2, EDGE2_TYPE)
lj_atoms += parser.get_atom_indices(2, EDGE3_TYPE)
lj_atoms += parser.get_atom_indices(3, EDGE2_TYPE)
lj_atoms += parser.get_atom_indices(3, EDGE3_TYPE)
lj_atoms = np.array(sorted(lj_atoms)).ravel()
self.lj_atoms = lj_atoms
self.extra_atoms = []
for i in xrange(self.nrigid):
self.extra_atoms += parser.get_atom_indices(i, OTHER_TYPE)
plate_pot = PlatePotential(harmonic_atoms1, harmonic_atoms2, lj_atoms, k=10)
# wrap it so it can be used with angle axis coordinates
pot = RBPotentialWrapper(self.aatopology.cpp_topology, plate_pot)
# self.aasystem.set_cpp_topology(self.pot.topology)
# raise Exception
return pot
def get_potential(self):
"""construct the rigid body potential"""
try:
return self.pot
except AttributeError:
return self._create_potential()
def get_mindist(self, **kwargs):
from pele.angleaxis import MinPermDistAACluster
from pele.angleaxis import TransformAngleAxisCluster, MeasureAngleAxisCluster
transform = TransformAngleAxisCluster(self.aatopology)
measure = MeasureAngleAxisCluster(self.aatopology, transform=transform,
permlist=[])
return MinPermDistAACluster(self.aasystem, measure=measure, transform=transform,
accuracy=0.1, **kwargs)
def draw(self, rbcoords, index, shift_com=True): # pragma: no cover
from pele.systems._opengl_tools import draw_atoms, draw_cylinder
from matplotlib.colors import cnames, hex2color
coords = self.aasystem.to_atomistic(rbcoords).copy()
coords = coords.reshape([-1,3])
if shift_com:
com=np.mean(coords, axis=0)
coords -= com[np.newaxis, :]
else:
com = np.zeros(3)
radius = 0.42
red = hex2color(cnames["red"])
c2 = hex2color(cnames["grey"])
draw_atoms(coords, self.harmonic_atoms, c2, radius=radius)
draw_atoms(coords, self.lj_atoms, red, radius=radius)
draw_atoms(coords, self.extra_atoms, c2, radius=radius)
for i1, i2 in self.draw_bonds:
draw_cylinder(coords[i1,:], coords[i2,:], color=c2)
def test_bh():
np.random.seed(0)
nmol = 4
system = PlateFolder(nmol)
db = system.create_database()
bh = system.get_basinhopping(db)
bh.run(100)
m1 = db.minima()[0]
print m1.coords
for x in m1.coords:
print "%.12f," % x,
print ""
print m1.energy
def test_gui():
from pele.gui import run_gui
nmol = 4
system = PlateFolder(nmol)
db = system.create_database("tetrahedra.sqlite")
run_gui(system, db)
if __name__ == "__main__":
test_gui()
# test_bh()
| gpl-3.0 |
bowenliu16/deepchem | deepchem/models/sklearn_models/__init__.py | 3 | 2360 | """
Code for processing datasets using scikit-learn.
"""
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
from sklearn.linear_model import LassoCV
from sklearn.linear_model import ElasticNetCV
from sklearn.linear_model import LassoLarsCV
from deepchem.models import Model
from deepchem.utils.save import load_from_disk
from deepchem.utils.save import save_to_disk
class SklearnModel(Model):
"""
Abstract base class for different ML models.
"""
def fit(self, dataset, **kwargs):
"""
Fits SKLearn model to data.
"""
X = dataset.X
y = np.squeeze(dataset.y)
w = np.squeeze(dataset.w)
# Logistic regression doesn't support weights
if not isinstance(self.model_instance, LogisticRegression):
self.model_instance.fit(X, y, w)
else:
self.model_instance.fit(X, y)
y_pred_raw = self.model_instance.predict(X)
def predict_on_batch(self, X, pad_batch=False):
"""
Makes predictions on batch of data.
Parameters
----------
X: np.ndarray
Features
pad_batch: bool, optional
Ignored for Sklearn Model. Only used for Tensorflow models
with rigid batch-size requirements.
"""
return self.model_instance.predict(X)
def predict_proba_on_batch(self, X, pad_batch=False):
"""
Makes per-class predictions on batch of data.
Parameters
----------
X: np.ndarray
Features
pad_batch: bool, optional
Ignored for Sklearn Model. Only used for Tensorflow models
with rigid batch-size requirements.
"""
return self.model_instance.predict_proba(X)
def predict(self, X, transformers=[]):
"""
Makes predictions on dataset.
"""
return super(SklearnModel, self).predict(X, transformers)
def save(self):
"""Saves sklearn model to disk using joblib."""
save_to_disk(self.model_instance, self.get_model_filename(self.model_dir))
def reload(self):
"""Loads sklearn model from joblib file on disk."""
self.model_instance = load_from_disk(Model.get_model_filename(self.model_dir))
def get_num_tasks(self):
"""Number of tasks for this model. Defaults to 1"""
return 1
| gpl-3.0 |
vortex-ape/scikit-learn | examples/decomposition/plot_pca_iris.py | 32 | 1516 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.nipy_spectral,
edgecolor='k')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
eteq/bokeh | bokeh/charts/builder/histogram_builder.py | 43 | 9142 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Histogram class which lets you build your histograms just passing
the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
try:
import scipy.special
_is_scipy = True
except ImportError as e:
_is_scipy = False
import numpy as np
from ..utils import chunk, cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, GlyphRenderer, Range1d
from ...models.glyphs import Line, Quad
from ...properties import Bool, Float, Int
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Histogram(values, bins, mu=None, sigma=None, density=True, **kws):
""" Create a histogram chart using :class:`HistogramBuilder <bokeh.charts.builder.histogram_builder.HistogramBuilder>`
to render the geometry from values, bins, sigma and density.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
bins (int): number of bins to use in the Histogram building.
mu (float, optional): theoretical mean value for the normal
distribution. (default: None)
sigma (float, optional): theoretical sigma value for the
normal distribution. (default: None)
density (bool, optional): If False, the result will contain
the number of samples in each bin. If True, the result
is the value of the probability *density* function at
the bin, normalized such that the *integral* over the
range is 1. For more info check numpy.histogram
function documentation. (default: True)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import pandas as pd
from bokeh.charts import Histogram, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = pd.DataFrame(dict(normal=[1, 2, 3, 1], lognormal=[5, 4, 4, 1]))
hm = Histogram(xyvalues, bins=5, title='Histogram')
output_file('histogram.html')
show(hm)
"""
return create_and_build(
HistogramBuilder, values, bins=bins, mu=mu, sigma=sigma, density=density,
**kws
)
class HistogramBuilder(Builder):
"""This is the Histogram class and it is in charge of plotting
histograms in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed glyphs (quads and lines) taking the
references from the source.
"""
bins = Int(10, help="""
Number of bins to use for the histogram. (default: 10)
""")
mu = Float(help="""
Theoretical mean value for the normal distribution. (default: None)
""")
sigma = Float(help="""
Theoretical standard deviation value for the normal distribution.
(default: None)
""")
density = Bool(True, help="""
Whether to normalize the histogram. (default: True)
If True, the result is the value of the probability *density* function
at the bin, normalized such that the *integral* over the range is 1. If
False, the result will contain the number of samples in each bin.
For more info check ``numpy.histogram`` function documentation.
""")
def _process_data(self):
"""Take the Histogram data from the input **value.
It calculates the chart properties accordingly. Then build a dict
containing references to all the calculated points to be used by
the quad and line glyphs inside the ``_yield_renderers`` method.
"""
# list to save all the groups available in the incomming input
self._groups.extend(self._values.keys())
# fill the data dictionary with the proper values
for i, (val, values) in enumerate(self._values.items()):
self.set_and_get("", val, values)
#build the histogram using the set bins number
hist, edges = np.histogram(
np.array(values), density=self.density, bins=self.bins
)
self.set_and_get("hist", val, hist)
self.set_and_get("edges", val, edges)
self.set_and_get("left", val, edges[:-1])
self.set_and_get("right", val, edges[1:])
self.set_and_get("bottom", val, np.zeros(len(hist)))
self._mu_and_sigma = False
if self.mu is not None and self.sigma is not None:
if _is_scipy:
self._mu_and_sigma = True
self.set_and_get("x", val, np.linspace(-2, 2, len(self._data[val])))
den = 2 * self.sigma ** 2
x_val = self._data["x" + val]
x_val_mu = x_val - self.mu
sigsqr2pi = self.sigma * np.sqrt(2 * np.pi)
pdf = 1 / (sigsqr2pi) * np.exp(-x_val_mu ** 2 / den)
self.set_and_get("pdf", val, pdf)
self._groups.append("pdf")
cdf = (1 + scipy.special.erf(x_val_mu / np.sqrt(den))) / 2
self.set_and_get("cdf", val, cdf)
self._groups.append("cdf")
else:
print("You need scipy to get the theoretical probability distributions.")
def _set_sources(self):
"""Push the Histogram data into the ColumnDataSource and calculate
the proper ranges."""
self._source = ColumnDataSource(data=self._data)
if not self._mu_and_sigma:
x_names, y_names = self._attr[2::6], self._attr[1::6]
else:
x_names, y_names = self._attr[2::9], self._attr[1::9]
endx = max(max(self._data[i]) for i in x_names)
startx = min(min(self._data[i]) for i in x_names)
self.x_range = Range1d(start=startx - 0.1 * (endx - startx),
end=endx + 0.1 * (endx - startx))
endy = max(max(self._data[i]) for i in y_names)
self.y_range = Range1d(start=0, end=1.1 * endy)
def _yield_renderers(self):
"""Use the several glyphs to display the Histogram and pdf/cdf.
It uses the quad (and line) glyphs to display the Histogram
bars, taking as reference points the data loaded at the
ColumnDataSurce.
"""
if not self._mu_and_sigma:
sextets = list(chunk(self._attr, 6))
colors = cycle_colors(sextets, self.palette)
# TODO (bev) this is a perfect use for a namedtuple
# sextet: values, his, edges, left, right, bottom
for i, sextet in enumerate(sextets):
glyph = Quad(
top=sextet[1], bottom=sextet[5], left=sextet[3], right=sextet[4],
fill_color=colors[i], fill_alpha=0.7,
line_color="white", line_alpha=1.0
)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
else:
nonets = list(chunk(self._attr, 9))
colors = cycle_colors(nonets, self.palette)
# TODO (bev) this is a perfect use for a namedtuple
# nonet: values, his, edges, left, right, bottom, x, pdf, cdf
for i, nonet in enumerate(nonets):
glyph = Quad(
top=nonet[1], bottom=nonet[5], left=nonet[3], right=nonet[4],
fill_color=colors[i], fill_alpha=0.7,
line_color="white", line_alpha=1.0
)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
glyph = Line(x=nonet[6], y=nonet[7], line_color="black")
yield GlyphRenderer(data_source=self._source, glyph=glyph)
glyph = Line(x=nonet[6], y=nonet[8], line_color="blue")
yield GlyphRenderer(data_source=self._source, glyph=glyph)
| bsd-3-clause |
aminert/scikit-learn | examples/hetero_feature_union.py | 288 | 6236 | """
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this exmaple faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
| bsd-3-clause |
Wyliodrin/wyliodrin-app-server | source/libraries/notebook/loader.py | 1 | 3374 | import sys
import json
import os
import traceback
import types
import redis
wyliodrin_redis = redis.StrictRedis ()
#import matplotlib
#matplotlib.use ('Agg')
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import PythonTracebackLexer
wyliodrin_formatter = HtmlFormatter (noclasses = True)
wyliodrin_types = []
def wyliodrin_print_result (strtype, strvalue):
if not isinstance (strtype, str):
strtype = repr (strtype)
#os.write (3, '\n================================\n')
# os.write (3, strtype+'\n')
# try:
# os.write (3, strvalue)
# except UnicodeEncodeError:
# bytes = strvalue.encode('UTF-8', 'backslashreplace')
# os.write(3, bytes)
# os.write (3, '\n'+sys.argv[1]+'\n')
wyliodrin_redis.publish (sys.argv[1]+'response', strtype+'\n'+strvalue)
def wyliodrin_display (value):
strtype = type (value)
strvalue = None
if value is None:
return
__builtins__._ = None
#print isinstance (value, (types.TypeType, types.ClassType, types.ObjectType))
if isinstance (value, (types.TypeType, types.ClassType, types.ObjectType)):
#print 'object'
# HTML
if not strvalue:
try:
if callable (value._repr_html_):
strvalue = value._repr_html_ ()
strtype = '<format \'html\'>'
except Exception, e:
pass
# SVG
if not strvalue:
try:
if callable (value._repr_svg_):
strvalue = value._repr_svg_ ()
strtype = '<format \'html\'>'
except Exception, e:
pass
# PNG
if not strvalue:
try:
if callable (value._repr_png_):
strvalue = value._repr_png_ ()
strtype = '<format \'png\'>'
except Exception, e:
pass
# JPG
if not strvalue:
try:
if callable (value._repr_jpg_):
strvalue = value._repr_jpg_ ()
strtype = '<format \'jpg\'>'
except Exception, e:
pass
# latex
if not strvalue:
try:
if callable (value._repr_latex_):
strvalue = value._repr_latex_ ()
strtype = '<format \'latex\'>'
except Exception, e:
pass
# types
pos = 0;
# print strvalue
while strvalue == None and pos < len (wyliodrin_types):
try:
(t, s) = wyliodrin_types[pos](value)
if t and s:
strtype = t
strvalue =s
elif t:
return
except Exception, e:
pass
pos = pos + 1
# other
if not strvalue:
strvalue = repr (value)
else:
strvalue = repr (value)
if strvalue:
wyliodrin_print_result (strtype, strvalue)
def wyliodrin_exception (type, value, tb):
exception = ''.join (traceback.format_exception(type, value, tb))
exception_html = highlight (exception, PythonTracebackLexer(), wyliodrin_formatter)
#os.write (4, exception_html)
#os.write (4, '\n'+sys.argv[1]+'\n')
wyliodrin_redis.publish (sys.argv[1]+'exception', exception_html)
sys.displayhook = wyliodrin_display
sys.excepthook = wyliodrin_exception
sys.ps1 = '>>>\n';
sys.ps2 = '...\n';
os.chdir (os.getenv ('HOME')+'/notebook')
os.umask (002)
def wyliodrin_plot_type (value):
try:
import StringIO
# print callable (value.get_figure)
if hasattr (value, 'get_figure') and callable (value.get_figure):
# print 'plot type'
fig = value.get_figure ()
imgdata = StringIO.StringIO ()
fig.savefig (imgdata, format='svg')
return ('<format \'svg\'>', imgdata.getvalue ())
except Exception, e:
print e
pass
return (None, None)
wyliodrin_types.append (wyliodrin_plot_type)
| gpl-3.0 |
beepee14/scikit-learn | examples/model_selection/grid_search_digits.py | 227 | 2665 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
CuchulainX/featherweight.light | fit_camera.py | 3 | 4000 | #!/usr/bin/python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib
#matplotlib.use('WXAgg') ## Seems to be the only backend to work without complaints.
from pylab import *
from color_block import gucci_dict
from fit_mapping import IntrinsicParameters, ExtrinsicParameters, PinholeCamera, quaternion_to_matrix
###############################################################################
##
##
if __name__ == '__main__':
ion() ## Turn on real-time plotting
## Image plotting colors
register_cmap(name='guc', data=gucci_dict)
rc('image', cmap='guc')
# rc('image', cmap='RdBu')
get_points = True
#get_points = False
## Check number of parameters
if len(sys.argv)<2:
raise Exception('''Incorrect number of parameters.
Usage: %s <data_path>'''%(sys.argv[0]))
paul_data = True
## Get the name of directory that contains the data. It should contain two
## files named 'params.txt' and 'disparity.txt'.
data_path = '%s/'%(sys.argv[1])
if paul_data:
## Load the image with the disparity values. E.g., the range data produced by Kinect.
disparity = loadtxt(data_path+'kinect.mat')
#disparity = rot90(loadtxt(data_path+'kinect.mat'),2)
k_f = 640
k_oc = .5*(1+array([disparity.shape[1], disparity.shape[0]]))
kin_int = IntrinsicParameters(k_f, k_oc)
cam_shot = rot90(imread(data_path+'img.png'),3)
c_f = 86/.009 # (Lens focal length divided by pixel size, in mm)
c_oc = array([cam_shot.shape[1]/2., cam_shot.shape[0]/2.])
cam_int = IntrinsicParameters(c_f, c_oc)
if get_points:
figure(1)
print """
Please select four points in the frist image. Click on a 5th point (that will be
discarded) to finish. Remember the order you selected them. If you click on the
right spot, the result will be bad, and it will not be a fault of the
algorithm. It can only be attributable to human error.
"""
imshow(disparity, vmin=420, vmax=560)
k_pts = array(ginput(n=5, show_clicks=True)[:-1])
print """
Now select the corresponding points. _In the same order_.
"""
imshow(cam_shot)
c_pts = array(ginput(n=5, show_clicks=True)[:-1])
else:
k_pts = array([[ 194.33870968, 93.69354839],
[ 190.46774194, 374.98387097],
[ 360.79032258, 373.69354839],
[ 372.40322581, 119.5 ]])
c_pts=array([[ 390.5 , 125.5],
[ 390.5 , 3181. ],
[ 2543. , 3170.5],
[ 2574.5 , 125.5]])
print k_pts
print c_pts
pp = array(k_pts, dtype=int)
pp_dis = disparity[pp[:,1], pp[:,0]]
xyz = kin_int.coordinates_from_xy_disparity(pp, pp_dis)
print xyz
T = array([0,0,0])
R = quaternion_to_matrix(array([0,0,0]))
cam_ext = ExtrinsicParameters(T, R)
c_camera = PinholeCamera(cam_int, cam_ext)
print c_camera.project_into_camera(xyz)
c_camera.find_pose(xyz, c_pts)
reproj = c_camera.project_into_camera(xyz)
print c_camera.ext_param.T
print c_camera.ext_param.Q
savetxt(data_path+'params.txt', [c_camera.ext_param.T, c_camera.ext_param.Q ])
figure(2)
subplot(1,2,1)
imshow(disparity, vmin=420, vmax=560)
plot(k_pts[:,0], k_pts[:,1], 'b+', ms=10, mew=2)
subplot(1,2,2)
imshow(cam_shot)
plot(c_pts[:,0], c_pts[:,1], 'b+', ms=10, mew=2)
plot(reproj[:,0], reproj[:,1], 'rx', ms=10, mew=2)
suptitle('Camera localization from user selected points', fontweight='bold', fontsize=20)
| apache-2.0 |
nburn42/tensorflow | tensorflow/examples/learn/text_classification_cnn.py | 21 | 5221 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for CNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
n_words = 0
MAX_LABEL = 15
WORDS_FEATURE = 'words' # Name of the input words feature.
def cnn_model(features, labels, mode):
"""2 layer ConvNet to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.layers.conv2d(
word_vectors,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE1,
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu)
# Max pooling across output of Convolution+Relu.
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=POOLING_WINDOW,
strides=POOLING_STRIDE,
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.layers.conv2d(
pool1,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), axis=[1])
# Apply regular WX + B and classification.
logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = tf.estimator.Estimator(model_fn=cnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Evaluate.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy: {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
antgonza/qiime | tests/test_make_2d_plots.py | 15 | 13517 | #!/usr/bin/env python
# file test_make_2d_plots.py
__author__ = "Jesse Stombaugh"
__copyright__ = "Copyright 2011, The QIIME Project" # consider project name
# remember to add yourself
__credits__ = ["Jesse Stombaugh", "Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jesse Stombaugh"
__email__ = "[email protected]"
from string import digits
import matplotlib
from matplotlib import use
use('Agg', warn=False)
from numpy import array
from os.path import exists, join
from StringIO import StringIO
from unittest import TestCase, main
from numpy.testing import assert_almost_equal
from os import remove
from qiime.make_2d_plots import (make_interactive_scatter, transform_xy_coords,
draw_scatterplot, draw_pcoa_graph,
extract_and_color_xy_coords, write_html_file,
create_html_filename,
convert_coord_data_to_dict, generate_xmap,
draw_scree_graph, make_line_plot)
from qiime.colors import data_colors
from qiime.util import get_qiime_temp_dir
class TopLevelTests(TestCase):
"""Tests of top-level functions"""
def setUp(self):
"""define some top-level data"""
self.tmp_dir = get_qiime_temp_dir()
self.props = {
"title": "PCoA - PC1 vs PC2",
"ylabel": "PC2",
"xlabel": "PC1"}
self.props_scree = {
"title": "Scree plor",
"ylabel": "Fraction of variance",
"xlabel": "Principal component"}
self.data = {}
self.data['coord'] = [['Sample1', 'Sample2'], array([[-0.2, 0.07],
[-0.04, 0.2]]), array(
[0.7, 0.6]),
array([25.00, 30.00])]
self.data[
'map'] = [['#SampleID', 'Day'], ['Sample1', 'Day1'], ['Sample2',
'Day1']]
self.coord_tups = [("1", "2"), ("3", "2"), ("1", "3")]
self.generate_eps = True
self.data['alpha'] = 0.33
self.groups = {}
self.groups['Day1'] = ['Sample1', 'Sample2']
self.colors = {}
self.colors['Day1'] = 'blue1'
self.prefs = {}
self.prefs['Sample'] = {}
self.prefs['Sample']['column'] = 'Day'
self.data_color_hsv = {
'blue1': (240, 100, 100)
}
self.data_color_order = ['blue1', []]
self.background_color = 'black'
self.label_color = 'white'
self.dir_path = '/tmp/'
self.data_file_link = '/tmp/'
self.xy_coords = {}
self.xy_coords['Sample1'] = ([-0.2], [0.07], ['Sample1: Day1'],
['#0000ff'], ['s'], [None], [None], [None])
self.xy_coords['Sample2'] = ([-0.04], [0.2], ['Sample2: Day1'],
['#0000ff'], ['s'], [None], [None], [None])
self.xy_coords_scree = {}
self.xy_coords_scree['Variance'] = ([1, 2], [0.28, 0.12], 's', 'b')
self.xy_coords_scree['Cum Variance'] = ([1, 2], [0.28, 0.40], 'o', 'r')
self.coord_1 = '1'
self.coord_2 = '2'
self.p2d = {}
self.p2d['Sample1'] = -0.2
self.p2d['Sample2'] = -0.04
self.p1d = {}
self.p1d['Sample1'] = 0.07
self.p1d['Sample2'] = 0.2
self.all_cids = {}
self.all_cids = ['Sample1: Day1', 'Sample2: Day1']
self.all_xcoords = [100.79999999999998, 279.36000000000001]
self.all_ycoords = [54.000000000000014, 288.0]
self.plot_label = 'SampleID'
self.coords = {'pc vector number': ['Sample1', 'Sample2'], '1':
array([-0.2, -0.04]), '2': array([0.07, 0.2])}
self.x_len = 4.5
self.y_len = 4.5
self.size = 20
self.alpha = 0.33
self._paths_to_clean_up = []
def tearDown(self):
map(remove, self._paths_to_clean_up)
def remove_nums(self, text):
"""Removes all digits from the given string.
Returns the string will all digits removed. Useful for testing strings
for equality in unit tests where you don't care about numeric values,
or if some values are random.
This code was taken from http://bytes.com/topic/python/answers/
850562-finding-all-numbers-string-replacing
Arguments:
text - the string to remove digits from
"""
return text.translate(None, digits)
def test_make_line_plot(self):
""" make_line_plot: creates HTML source for scree plot"""
filename1 = join(self.tmp_dir, 'scree_plot.png')
filename2 = join(self.tmp_dir, 'scree_plot.eps.gz')
self._paths_to_clean_up = [filename1, filename2]
obs1, obs2 = make_line_plot(self.tmp_dir, self.tmp_dir,
self.background_color, self.label_color,
self.xy_coords_scree, self.props_scree,
x_len=4.5, y_len=4.5, generate_eps=True)
self.assertEqual(obs1, filename_scree % filename1)
self.assertEqual(obs2, expdownlink_scree % filename2)
self.assertTrue(
exists(filename1),
'The png file was not created in the appropiate location')
self.assertTrue(
exists(filename2),
'The eps file was not created in the appropiate location')
def test_make_interactive_scatter(self):
"""make_interactive_scatter: creates HTML source for interactive \
images"""
filename1 = '/tmp/PC1_vs_PC2_plot.png'
filename2 = '/tmp/PC1vsPC2plot.eps.gz'
self._paths_to_clean_up = [filename1, filename2]
obs1, obs2, obs3 = make_interactive_scatter(
self.plot_label, self.dir_path,
self.data_file_link, self.background_color,
self.label_color, None, self.alpha,
self.xy_coords, self.props,
self.x_len, self.y_len, self.size,
draw_axes=False, generate_eps=True)
self.assertEqual(self.remove_nums(obs1), self.remove_nums(expsrcmap1))
self.assertEqual(self.remove_nums(obs2), self.remove_nums(expimgmap1))
self.assertEqual(self.remove_nums(obs3), self.remove_nums(expeps1))
self.assertTrue(exists(filename1), 'The png file was not created in \
the appropriate location')
self.assertTrue(exists(filename2), 'The eps file was not created in \
the appropriate location')
def test_generate_xmap(self):
"""generate_xmap: generates the html area map"""
exp2 = 360
exp3 = 360
obs1, obs2, obs3 = generate_xmap(self.x_len, self.y_len, self.all_cids,
self.all_xcoords, self.all_ycoords)
self.assertEqual(obs1, exparea)
self.assertEqual(obs2, exp2)
self.assertEqual(obs3, exp3)
def test_draw_scatterplot(self):
"""draw_scatterplot: draws the matplotlib scatterplot"""
exp = array([[-0.04, 0.2]])
sc_plot = draw_scatterplot(self.props, self.xy_coords, self.x_len,
self.y_len, self.size,
self.background_color, self.label_color, None,
self.alpha)
obs = sc_plot.get_offsets()
assert_almost_equal(obs, exp)
def test_transform_xy_coords(self):
"""transform_xy_coords: transforms the xy coords from the matplotlib \
plot into html spatial coords which allows for mouseovers"""
sc_plot = draw_scatterplot(self.props, self.xy_coords, self.x_len,
self.y_len, self.size,
self.background_color, self.label_color, None,
self.alpha)
obs1, obs2, obs3 = transform_xy_coords(self.xy_coords, sc_plot)
self.assertEqual(len(obs1), len(self.all_cids))
self.assertEqual(len(obs2), len(self.all_xcoords))
self.assertEqual(len(obs3), len(self.all_ycoords))
def test_draw_scree_graph(self):
"""draw_scree_graph: draws the matplotlib figure"""
filename1 = join(self.tmp_dir, 'scree_plot.png')
filename2 = join(self.tmp_dir, 'scree_plot.eps.gz')
self._paths_to_clean_up = [filename1, filename2]
obs1, obs2 = draw_scree_graph(self.tmp_dir, self.tmp_dir,
self.background_color, self.label_color,
generate_eps=True, data=self.data)
self.assertEqual(obs1, expimgsrc_scree % filename1)
self.assertEqual(obs2, expdownlink_scree % filename2)
self.assertTrue(
exists(filename1),
'The png file was not created in the appropriate location')
self.assertTrue(
exists(filename2),
'The eps file was not created in the appropriate location')
def test_draw_pcoa_graph(self):
"""draw_pcoa_graph: draws the matplotlib figure"""
filename1 = '/tmp/PC1_vs_PC2_plot.png'
filename2 = '/tmp/PC1vsPC2plot.eps.gz'
self._paths_to_clean_up = [filename1, filename2]
obs1, obs2 = draw_pcoa_graph(self.plot_label, self.dir_path,
self.data_file_link, self.coord_1, self.coord_2,
None, None, None, None,
self.data, self.prefs, self.groups, self.colors,
self.background_color, self.label_color,
data_colors, self.data_color_order,
generate_eps=True)
self.assertEqual(obs1, expsrcmap2 + expimgmap2)
self.assertEqual(obs2, expeps2)
self.assertTrue(exists(filename1), 'The png file was not created in \
the appropriate location')
self.assertTrue(exists(filename2), 'The eps file was not created in \
the appropriate location')
def test_extract_and_color_xy_coords(self):
"""extract_and_color_xy_coords: gets coords from coords file and \
associates colors to those coords based on its group"""
obs = extract_and_color_xy_coords(
self.p1d, self.p2d, None, None, None, self.colors,
data_colors, self.groups, self.coords)
self.assertEqual(obs['Sample1'], self.xy_coords['Sample1'])
self.assertEqual(obs['Sample2'], self.xy_coords['Sample2'])
def test_create_html_filename(self):
"""create_html_filename: using the pcoa filename, generates an html \
filename for the plots"""
exp = 'test_2D.html'
obs = create_html_filename(
coord_filename='test',
name_ending='_2D.html')
self.assertEqual(obs, exp)
def test_convert_coord_data_to_dict(self):
"""convert_coord_data_to_dict: converts the coords list into a \
dictionary"""
exp1 = {
'pc vector number': ['Sample1', 'Sample2'],
'1': array([-0.2, -0.04]),
'2': array([0.07, 0.2])}
exp2 = {'1': [25.00], '2': [30.00], }
obs1, obs2 = convert_coord_data_to_dict(self.data)
self.assertEqual(exp1['pc vector number'], obs1['pc vector number'])
assert_almost_equal(exp1['1'], obs1['1'])
assert_almost_equal(exp1['2'], obs1['2'])
assert_almost_equal(exp2['1'], obs2['1'])
assert_almost_equal(exp2['2'], obs2['2'])
def test_write_html_file(self):
"Write html and make sure it gets cleaned up"""
filename1 = '/tmp/test.html'
self._paths_to_clean_up = [filename1]
write_html_file('Test', '/tmp/test.html')
self.assertTrue(exists(filename1), 'The file was not created in \
the appropriate location')
# expected results for the unit testing
exparea = [
'<AREA shape="circle" coords="100,306,5" href="#Sample1: Day1" onmouseover="return overlib(\'Sample1: Day1\');" onmouseout="return nd();">\n',
'<AREA shape="circle" coords="279,72,5" href="#Sample2: Day1" onmouseover="return overlib(\'Sample2: Day1\');" onmouseout="return nd();">\n']
expsrcmap1 = '<img src="/tmp/PC1_vs_PC2_plot.png" border="0" ismap usemap="#pointsSampleID12" width="360" height="360" />\n'
expimgmap1 = '\n<MAP name="pointsSampleID12">\n\
<AREA shape="circle" coords="100,306,5" href="#Sample1: Day1" onmouseover="return overlib(\'Sample1: Day1\');" onmouseout="return nd();">\n\
<AREA shape="circle" coords="279,72,5" href="#Sample2: Day1" onmouseover="return overlib(\'Sample2: Day1\');" onmouseout="return nd();">\n\n\
</MAP>\n'
expeps1 = '<a href="/tmp/PC1vsPC2plot.eps.gz" >Download Figure</a>'
expsrcmap2 = '<img src="/tmp/PC1_vs_PC2_plot.png" border="0" ismap usemap="#pointsSampleID12" width="360" height="360" />\n'
expimgmap2 = '\n<MAP name="pointsSampleID12">\n\
<AREA shape="circle" coords="100,208,5" href="#Sample1: Day1" onmouseover="return overlib(\'Sample1: Day1\');" onmouseout="return nd();">\n\
<AREA shape="circle" coords="279,84,5" href="#Sample2: Day1" onmouseover="return overlib(\'Sample2: Day1\');" onmouseout="return nd();">\n\n\
</MAP>\n'
expeps2 = '<a href="/tmp/PC1vsPC2plot.eps.gz" >Download Figure</a>'
filename_scree = '%s'
expdownlink_scree = '<a href="%s" >Download Figure</a>'
expimgsrc_scree = '<img src="%s" border=0 />'
# run tests if called from command line
if __name__ == "__main__":
main()
| gpl-2.0 |
johankaito/fufuka | graph-tool/src/graph_tool/draw/graphviz_draw.py | 2 | 24109 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# graph_tool -- a general graph manipulation python module
#
# Copyright (C) 2006-2015 Tiago de Paula Peixoto <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division, absolute_import, print_function
import sys
import os
import os.path
import time
import warnings
import ctypes
import ctypes.util
import tempfile
from .. import PropertyMap, group_vector_property, ungroup_vector_property
import numpy.random
import copy
from .. draw import arf_layout
try:
import matplotlib.cm
import matplotlib.colors
except ImportError:
msg = "Error importing matplotlib module... graphviz_draw() will not work"
warnings.filterwarnings("always", msg, ImportWarning)
warnings.warn(msg, ImportWarning)
raise
try:
libname = ctypes.util.find_library("c")
libc = ctypes.CDLL(libname)
if hasattr(libc, "open_memstream"):
libc.open_memstream.restype = ctypes.POINTER(ctypes.c_char)
except OSError:
msg = "Error importing C standard library... graphviz_draw() will not work."
warnings.filterwarnings("always", msg, ImportWarning)
warnings.warn(msg, ImportWarning)
pass
try:
libname = ctypes.util.find_library("gvc")
if libname is None:
raise OSError()
libgv = ctypes.CDLL(libname, ctypes.RTLD_GLOBAL)
# properly set the return types of certain functions
ptype = ctypes.POINTER(ctypes.c_char)
libgv.gvContext.restype = ptype
libgv.agopen.restype = ptype
libgv.agnode.restype = ptype
libgv.agedge.restype = ptype
libgv.agget.restype = ptype
libgv.agstrdup_html.restype = ptype
# create a context to use the whole time (if we keep freeing and recreating
# it, we will hit a memory leak in graphviz)
gvc = libgv.gvContext()
try:
gv_new_api = True
libgv_directed = libgv.Agdirected
libgv_undirected = libgv.Agundirected
except AttributeError:
gv_new_api = False
libgv_directed = 1
libgv_undirected = 0
except OSError:
msg = "Error importing graphviz C library (libgvc)... graphviz_draw() will not work."
warnings.filterwarnings("always", msg, ImportWarning)
warnings.warn(msg, ImportWarning)
def htmlize(val):
if len(val) >= 2 and val[0] == "<" and val[-1] == ">":
return ctypes.string_at(libgv.agstrdup_html(val[1:-1]))
return val
def aset(elem, attr, value):
v = htmlize(str(value)).encode("utf8")
libgv.agsafeset(elem, str(attr).encode("utf8"), v, v)
def aget(elem, attr):
s = ctypes.string_at(libgv.agget(elem,
str(attr).encode("utf8")))
return s.decode("utf8")
def graphviz_draw(g, pos=None, size=(15, 15), pin=False, layout=None,
maxiter=None, ratio="fill", overlap=True, sep=None,
splines=False, vsize=0.105, penwidth=1.0, elen=None,
gprops={}, vprops={}, eprops={}, vcolor="#a40000",
ecolor="#2e3436", vcmap=None, vnorm=True, ecmap=None,
enorm=True, vorder=None, eorder=None, output="",
output_format="auto", fork=False, return_string=False):
r"""Draw a graph using graphviz.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be drawn.
pos : :class:`~graph_tool.PropertyMap` or tuple of :class:`~graph_tool.PropertyMap` (optional, default: ``None``)
Vertex property maps containing the x and y coordinates of the vertices.
size : tuple of scalars (optional, default: ``(15,15)``)
Size (in centimeters) of the canvas.
pin : bool or :class:`~graph_tool.PropertyMap` (default: ``False``)
If ``True``, the vertices are not moved from their initial position. If
a :class:`~graph_tool.PropertyMap` is passed, it is used to pin nodes
individually.
layout : string (default: ``"neato" if g.num_vertices() <= 1000 else "sfdp"``)
Layout engine to be used. Possible values are ``"neato"``, ``"fdp"``,
``"dot"``, ``"circo"``, ``"twopi"`` and ``"arf"``.
maxiter : int (default: ``None``)
If specified, limits the maximum number of iterations.
ratio : string or float (default: ``"fill"``)
Sets the aspect ratio (drawing height/drawing width) for the
drawing. Note that this is adjusted before the ``size`` attribute
constraints are enforced.
If ``ratio`` is numeric, it is taken as the desired aspect ratio. Then,
if the actual aspect ratio is less than the desired ratio, the drawing
height is scaled up to achieve the desired ratio; if the actual ratio is
greater than that desired ratio, the drawing width is scaled up.
If ``ratio == "fill"`` and the size attribute is set, node positions are
scaled, separately in both x and y, so that the final drawing exactly
fills the specified size.
If ``ratio == "compress"`` and the size attribute is set, dot attempts
to compress the initial layout to fit in the given size. This achieves a
tighter packing of nodes but reduces the balance and symmetry. This
feature only works in dot.
If ``ratio == "expand"``, the size attribute is set, and both the width
and the height of the graph are less than the value in size, node
positions are scaled uniformly until at least one dimension fits size
exactly. Note that this is distinct from using size as the desired
size, as here the drawing is expanded before edges are generated and all
node and text sizes remain unchanged.
If ``ratio == "auto"``, the page attribute is set and the graph cannot
be drawn on a single page, then size is set to an "ideal" value. In
particular, the size in a given dimension will be the smallest integral
multiple of the page size in that dimension which is at least half the
current size. The two dimensions are then scaled independently to the
new size. This feature only works in dot.
overlap : bool or string (default: ``"prism"``)
Determines if and how node overlaps should be removed. Nodes are first
enlarged using the sep attribute. If ``True``, overlaps are retained. If
the value is ``"scale"``, overlaps are removed by uniformly scaling in x
and y. If the value is ``False``, node overlaps are removed by a
Voronoi-based technique. If the value is ``"scalexy"``, x and y are
separately scaled to remove overlaps.
If sfdp is available, one can set overlap to ``"prism"`` to use a
proximity graph-based algorithm for overlap removal. This is the
preferred technique, though ``"scale"`` and ``False`` can work well with
small graphs. This technique starts with a small scaling up, controlled
by the overlap_scaling attribute, which can remove a significant portion
of the overlap. The prism option also accepts an optional non-negative
integer suffix. This can be used to control the number of attempts made
at overlap removal. By default, ``overlap == "prism"`` is equivalent to
``overlap == "prism1000"``. Setting ``overlap == "prism0"`` causes only
the scaling phase to be run.
If the value is ``"compress"``, the layout will be scaled down as much
as possible without introducing any overlaps, obviously assuming there
are none to begin with.
sep : float (default: ``None``)
Specifies margin to leave around nodes when removing node overlap. This
guarantees a minimal non-zero distance between nodes.
splines : bool (default: ``False``)
If ``True``, the edges are drawn as splines and routed around the
vertices.
vsize : float, :class:`~graph_tool.PropertyMap`, or tuple (default: ``0.105``)
Default vertex size (width and height). If a tuple is specified, the
first value should be a property map, and the second is a scale factor.
penwidth : float, :class:`~graph_tool.PropertyMap` or tuple (default: ``1.0``)
Specifies the width of the pen, in points, used to draw lines and
curves, including the boundaries of edges and clusters. It has no effect
on text. If a tuple is specified, the first value should be a property
map, and the second is a scale factor.
elen : float or :class:`~graph_tool.PropertyMap` (default: ``None``)
Preferred edge length, in inches.
gprops : dict (default: ``{}``)
Additional graph properties, as a dictionary. The keys are the property
names, and the values must be convertible to string.
vprops : dict (default: ``{}``)
Additional vertex properties, as a dictionary. The keys are the property
names, and the values must be convertible to string, or vertex property
maps, with values convertible to strings.
eprops : dict (default: ``{}``)
Additional edge properties, as a dictionary. The keys are the property
names, and the values must be convertible to string, or edge property
maps, with values convertible to strings.
vcolor : string or :class:`~graph_tool.PropertyMap` (default: ``"#a40000"``)
Drawing color for vertices. If the valued supplied is a property map,
the values must be scalar types, whose color values are obtained from
the ``vcmap`` argument.
ecolor : string or :class:`~graph_tool.PropertyMap` (default: ``"#2e3436"``)
Drawing color for edges. If the valued supplied is a property map,
the values must be scalar types, whose color values are obtained from
the ``ecmap`` argument.
vcmap : :class:`matplotlib.colors.Colormap` (default: :class:`matplotlib.cm.jet`)
Vertex color map.
vnorm : bool (default: ``True``)
Normalize vertex color values to the [0,1] range.
ecmap : :class:`matplotlib.colors.Colormap` (default: :class:`matplotlib.cm.jet`)
Edge color map.
enorm : bool (default: ``True``)
Normalize edge color values to the [0,1] range.
vorder : :class:`~graph_tool.PropertyMap` (default: ``None``)
Scalar vertex property map which specifies the order with which vertices
are drawn.
eorder : :class:`~graph_tool.PropertyMap` (default: ``None``)
Scalar edge property map which specifies the order with which edges
are drawn.
output : string (default: ``""``)
Output file name.
output_format : string (default: ``"auto"``)
Output file format. Possible values are ``"auto"``, ``"xlib"``,
``"ps"``, ``"svg"``, ``"svgz"``, ``"fig"``, ``"mif"``, ``"hpgl"``,
``"pcl"``, ``"png"``, ``"gif"``, ``"dia"``, ``"imap"``, ``"cmapx"``. If
the value is ``"auto"``, the format is guessed from the ``output``
parameter, or ``xlib`` if it is empty. If the value is ``None``, no
output is produced.
fork : bool (default: ``False``)
If ``True``, the program is forked before drawing. This is used as a
work-around for a bug in graphviz, where the ``exit()`` function is
called, which would cause the calling program to end. This is always
assumed ``True``, if ``output_format == 'xlib'``.
return_string : bool (default: ``False``)
If ``True``, a string containing the rendered graph as binary data is
returned (defaults to png format).
Returns
-------
pos : :class:`~graph_tool.PropertyMap`
Vector vertex property map with the x and y coordinates of the vertices.
gv : gv.digraph or gv.graph (optional, only if ``returngv == True``)
Internally used graphviz graph.
Notes
-----
This function is a wrapper for the [graphviz]_ routines. Extensive additional
documentation for the graph, vertex and edge properties is available at:
http://www.graphviz.org/doc/info/attrs.html.
Examples
--------
.. testcode::
:hide:
np.random.seed(42)
gt.seed_rng(42)
from numpy import sqrt
>>> g = gt.price_network(1500)
>>> deg = g.degree_property_map("in")
>>> deg.a = 2 * (sqrt(deg.a) * 0.5 + 0.4)
>>> ebet = gt.betweenness(g)[1]
>>> gt.graphviz_draw(g, vcolor=deg, vorder=deg, elen=10,
... ecolor=ebet, eorder=ebet, output="graphviz-draw.pdf")
<...>
.. testcode::
:hide:
gt.graphviz_draw(g, vcolor=deg, vorder=deg, elen=10,
ecolor=ebet, eorder=ebet, output="graphviz-draw.png")
.. figure:: graphviz-draw.*
:align: center
Kamada-Kawai force-directed layout of a Price network with 1500
nodes. The vertex size and color indicate the degree, and the edge color
corresponds to the edge betweeness centrality
References
----------
.. [graphviz] http://www.graphviz.org
"""
if output != "" and output is not None:
output = os.path.expanduser(output)
# check opening file for writing, since graphviz will bork if it is not
# possible to open file
if os.path.dirname(output) != "" and \
not os.access(os.path.dirname(output), os.W_OK):
raise IOError("cannot write to " + os.path.dirname(output))
has_layout = False
try:
if gv_new_api:
gvg = libgv.agopen("G".encode("utf8"),
libgv_directed if g.is_directed() else libgv_undirected,
None)
else:
gvg = libgv.agopen("G".encode("utf8"),
libgv_directed if g.is_directed() else libgv_undirected)
if layout is None:
if pin == False:
layout = "neato" if g.num_vertices() <= 1000 else "sfdp"
else:
layout = "neato"
if layout == "arf":
layout = "neato"
pos = arf_layout(g, pos=pos)
pin = True
if pos is not None:
# copy user-supplied property
if isinstance(pos, PropertyMap):
pos = ungroup_vector_property(pos, [0, 1])
else:
pos = (g.copy_property(pos[0]), g.copy_property(pos[1]))
if type(vsize) == tuple:
s = g.new_vertex_property("double")
g.copy_property(vsize[0], s)
s.a *= vsize[1]
vsize = s
if type(penwidth) == tuple:
s = g.new_edge_property("double")
g.copy_property(penwidth[0], s)
s.a *= penwidth[1]
penwidth = s
# main graph properties
aset(gvg, "outputorder", "edgesfirst")
aset(gvg, "mode", "major")
if type(overlap) is bool:
overlap = "true" if overlap else "false"
else:
overlap = str(overlap)
aset(gvg, "overlap", overlap)
if sep is not None:
aset(gvg, "sep", sep)
if splines:
aset(gvg, "splines", "true")
aset(gvg, "ratio", ratio)
# size is in centimeters... convert to inches
aset(gvg, "size", "%f,%f" % (size[0] / 2.54, size[1] / 2.54))
if maxiter is not None:
aset(gvg, "maxiter", maxiter)
seed = numpy.random.randint(sys.maxsize)
aset(gvg, "start", "%d" % seed)
# apply all user supplied graph properties
for k, val in gprops.items():
if isinstance(val, PropertyMap):
aset(gvg, k, val[g])
else:
aset(gvg, k, val)
# normalize color properties
if (isinstance(vcolor, PropertyMap) and
vcolor.value_type() != "string"):
minmax = [float("inf"), -float("inf")]
for v in g.vertices():
c = vcolor[v]
minmax[0] = min(c, minmax[0])
minmax[1] = max(c, minmax[1])
if minmax[0] == minmax[1]:
minmax[1] += 1
if vnorm:
vnorm = matplotlib.colors.Normalize(vmin=minmax[0], vmax=minmax[1])
else:
vnorm = lambda x: x
if (isinstance(ecolor, PropertyMap) and
ecolor.value_type() != "string"):
minmax = [float("inf"), -float("inf")]
for e in g.edges():
c = ecolor[e]
minmax[0] = min(c, minmax[0])
minmax[1] = max(c, minmax[1])
if minmax[0] == minmax[1]:
minmax[1] += 1
if enorm:
enorm = matplotlib.colors.Normalize(vmin=minmax[0],
vmax=minmax[1])
else:
enorm = lambda x: x
if vcmap is None:
vcmap = matplotlib.cm.jet
if ecmap is None:
ecmap = matplotlib.cm.jet
# add nodes
if vorder is not None:
vertices = sorted(g.vertices(), key = lambda a: vorder[a])
else:
vertices = g.vertices()
for v in vertices:
if gv_new_api:
n = libgv.agnode(gvg, str(int(v)).encode("utf8"))
else:
n = libgv.agnode(gvg, str(int(v)).encode("utf8"), True)
if type(vsize) == PropertyMap:
vw = vh = vsize[v]
else:
vw = vh = vsize
aset(n, "shape", "circle")
aset(n, "width", "%g" % vw)
aset(n, "height", "%g" % vh)
aset(n, "style", "filled")
aset(n, "color", "#2e3436")
# apply color
if isinstance(vcolor, str):
aset(n, "fillcolor", vcolor)
else:
color = vcolor[v]
if isinstance(color, str):
aset(n, "fillcolor", color)
else:
color = tuple([int(c * 255.0) for c in vcmap(vnorm(color))])
aset(n, "fillcolor", "#%.2x%.2x%.2x%.2x" % color)
aset(n, "label", "")
# user supplied position
if pos is not None:
if isinstance(pin, bool):
pin_val = pin
else:
pin_val = pin[v]
aset(n, "pos", "%f,%f%s" % (pos[0][v], pos[1][v],
"!" if pin_val else ""))
aset(n, "pin", pin_val)
# apply all user supplied properties
for k, val in vprops.items():
if isinstance(val, PropertyMap):
aset(n, k, val[v])
else:
aset(n, k, val)
# add edges
if eorder is not None:
edges = sorted(g.edges(), key = lambda a: eorder[a])
else:
edges = g.edges()
for e in edges:
if gv_new_api:
ge = libgv.agedge(gvg,
libgv.agnode(gvg, str(int(e.source())).encode("utf8"), False),
libgv.agnode(gvg, str(int(e.target())).encode("utf8"), False),
str(g.edge_index[e]).encode("utf8"), True)
else:
ge = libgv.agedge(gvg,
libgv.agnode(gvg, str(int(e.source())).encode("utf8")),
libgv.agnode(gvg, str(int(e.target())).encode("utf8")))
aset(ge, "arrowsize", "0.3")
if g.is_directed():
aset(ge, "arrowhead", "vee")
# apply color
if isinstance(ecolor, str):
aset(ge, "color", ecolor)
else:
color = ecolor[e]
if isinstance(color, str):
aset(ge, "color", color)
else:
color = tuple([int(c * 255.0) for c in ecmap(enorm(color))])
aset(ge, "color", "#%.2x%.2x%.2x%.2x" % color)
# apply edge length
if elen is not None:
if isinstance(elen, PropertyMap):
aset(ge, "len", elen[e])
else:
aset(ge, "len", elen)
# apply width
if penwidth is not None:
if isinstance(penwidth, PropertyMap):
aset(ge, "penwidth", penwidth[e])
else:
aset(ge, "penwidth", penwidth)
# apply all user supplied properties
for k, v in eprops.items():
if isinstance(v, PropertyMap):
aset(ge, k, v[e])
else:
aset(ge, k, v)
libgv.gvLayout(gvc, gvg, layout.encode("utf8"))
has_layout = True
retv = libgv.gvRender(gvc, gvg, "dot".encode("utf8"), None) # retrieve positions only
if pos == None:
pos = (g.new_vertex_property("double"),
g.new_vertex_property("double"))
for v in g.vertices():
n = libgv.agnode(gvg, str(int(v)).encode("utf8"))
p = aget(n, "pos")
p = p.split(",")
pos[0][v] = float(p[0])
pos[1][v] = float(p[1])
# I don't get this, but it seems necessary
pos[0].a /= 100
pos[1].a /= 100
pos = group_vector_property(pos)
if return_string:
if output_format == "auto":
output_format = "png"
if hasattr(libc, "open_memstream"):
buf = ctypes.c_char_p()
buf_len = ctypes.c_size_t()
fstream = libc.open_memstream(ctypes.byref(buf),
ctypes.byref(buf_len))
libgv.gvRender(gvc, gvg, output_format.encode("utf8"), fstream)
libc.fclose(fstream)
data = copy.copy(ctypes.string_at(buf, buf_len.value))
libc.free(buf)
else:
# write to temporary file, if open_memstream is not available
output = tempfile.mkstemp()[1]
libgv.gvRenderFilename(gvc, gvg, output_format.encode("utf8"),
output.encode("utf8"))
data = open(output).read()
os.remove(output)
else:
if output_format == "auto":
if output == "":
output_format = "xlib"
elif output is not None:
output_format = output.split(".")[-1]
# if using xlib we need to fork the process, otherwise good ol'
# graphviz will call exit() when the window is closed
if output_format == "xlib" or fork:
pid = os.fork()
if pid == 0:
libgv.gvRenderFilename(gvc, gvg, output_format.encode("utf8"),
output.encode("utf8"))
os._exit(0) # since we forked, it's good to be sure
if output_format != "xlib":
os.wait()
elif output is not None:
libgv.gvRenderFilename(gvc, gvg, output_format.encode("utf8"),
output.encode("utf8"))
ret = [pos]
if return_string:
ret.append(data)
finally:
if has_layout:
libgv.gvFreeLayout(gvc, gvg)
libgv.agclose(gvg)
if len(ret) > 1:
return tuple(ret)
else:
return ret[0]
| apache-2.0 |
hajicj/MUSCIMarker | MUSCIMarker/syntax/dependency_parsers.py | 1 | 11720 | """This module implements a class that..."""
from __future__ import print_function, unicode_literals
from builtins import object
import collections
import logging
import numpy
from muscima.cropobject import cropobject_distance
from muscima.graph import find_beams_incoherent_with_stems
from muscima.inference_engine_constants import _CONST
from sklearn.feature_extraction import DictVectorizer
__version__ = "0.0.1"
__author__ = "Jan Hajic jr."
class SimpleDeterministicDependencyParser(object):
"""This dependency parser just adds all possible edges,
as defined by the given grammar."""
def __init__(self, grammar):
"""Initialize the parser.
:type grammar: DependencyGrammar
:param grammar: A DependencyGrammar.
"""
logging.info('SimpleDeterministicDependencyParser: Initializing parser.')
self.grammar = grammar
def parse(self, cropobjects):
"""Adds all edges allowed by the grammar, given the list
of symbols. The edges are (i, j) tuples of indices into the
supplied list of symbol names.
"""
symbol_names = [c.clsname for c in cropobjects]
symbol_name_idxs = self.get_all_possible_edges(symbol_names)
edges = [(cropobjects[i].objid, cropobjects[j].objid) for i, j in symbol_name_idxs]
return edges
def get_all_possible_edges(self, symbol_names):
"""Collects all symbol edges that are permissible, using the grammar.
:rtype: list
:returns: A list of ``(i, j)`` tuples, where ``i`` and ``j``
are indices into the list of symbol names (so that whoever
provided the names can track down the specific objects
from which the symbol names were collected - there is no
requirement that the names must be unique).
"""
edges = []
for i, s1, in enumerate(symbol_names):
for j, s2 in enumerate(symbol_names):
if self.grammar.is_head(s1, s2):
# No loops!
if i != j:
edges.append((i, j))
return edges
def set_grammar(self, grammar):
# More complex parsers might need to reset some internal states
self.grammar = grammar
class PairwiseClassificationParser(object):
"""This parser applies a simple classifier that takes the bounding
boxes of two CropObjects and their classes and returns whether there
is an edge or not."""
MAXIMUM_DISTANCE_THRESHOLD = 200
def __init__(self, grammar, clf, cropobject_feature_extractor):
self.grammar = grammar
self.clf = clf
self.extractor = cropobject_feature_extractor
def parse(self, cropobjects):
# Ensure the same docname for all cropobjects,
# since we later compute their distances.
# The correct docname gets set on export anyway.
default_doc = cropobjects[0].doc
for c in cropobjects:
c.set_doc(default_doc)
pairs, features = self.extract_all_pairs(cropobjects)
logging.info('Clf.Parse: {0} object pairs from {1} objects'.format(len(pairs), len(cropobjects)))
preds = self.clf.predict(features)
edges = []
for idx, (c_from, c_to) in enumerate(pairs):
if preds[idx] != 0:
edges.append((c_from.objid, c_to.objid))
edges = self._apply_trivial_fixes(cropobjects, edges)
return edges
def _apply_trivial_fixes(self, cropobjects, edges):
edges = self._only_one_stem_per_notehead(cropobjects, edges)
edges = self._every_full_notehead_has_a_stem(cropobjects, edges)
return edges
def _only_one_stem_per_notehead(self, cropobjects, edges):
_cdict = {c.objid: c for c in cropobjects}
# Collect stems per notehead
stems_per_notehead = collections.defaultdict(list)
stem_objids = set()
for f_objid, t_objid in edges:
f = _cdict[f_objid]
t = _cdict[t_objid]
if (f.clsname in _CONST.NOTEHEAD_CLSNAMES) and \
(t.clsname == 'stem'):
stems_per_notehead[f_objid].append(t_objid)
stem_objids.add(t_objid)
# Pick the closest one (by minimum distance)
closest_stems_per_notehead = dict()
for n_objid in stems_per_notehead:
n = _cdict[n_objid]
stems = [_cdict[objid] for objid in stems_per_notehead[n_objid]]
closest_stem = min(stems, key=lambda s: cropobject_distance(n, s))
closest_stems_per_notehead[n_objid] = closest_stem.objid
# Filter the edges
edges = [(f_objid, t_objid) for f_objid, t_objid in edges
if (f_objid not in closest_stems_per_notehead) or
(t_objid not in stem_objids) or
(closest_stems_per_notehead[f_objid] == t_objid)]
return edges
def _every_full_notehead_has_a_stem(self, cropobjects, edges):
_cdict = {c.objid: c for c in cropobjects}
# Collect stems per notehead
notehead_objids = set([c.objid for c in cropobjects if c.clsname == 'notehead-full'])
stem_objids = set([c.objid for c in cropobjects if c.clsname == 'stem'])
noteheads_with_stem_objids = set()
stems_with_notehead_objids = set()
for f, t in edges:
if _cdict[f].clsname == 'notehead-full':
if _cdict[t].clsname == 'stem':
noteheads_with_stem_objids.add(f)
stems_with_notehead_objids.add(t)
noteheads_without_stems = {n: _cdict[n] for n in notehead_objids
if n not in noteheads_with_stem_objids}
stems_without_noteheads = {n: _cdict[n] for n in stem_objids
if n not in stems_with_notehead_objids}
# To each notehead, assign the closest stem that is not yet taken.
closest_stem_per_notehead = {objid: min(stems_without_noteheads,
key=lambda x: cropobject_distance(_cdict[x], n))
for objid, n in list(noteheads_without_stems.items())}
# Filter edges that are too long
_n_before_filter = len(closest_stem_per_notehead)
closest_stem_threshold_distance = 80
closest_stem_per_notehead = {n_objid: s_objid
for n_objid, s_objid in list(closest_stem_per_notehead.items())
if cropobject_distance(_cdict[n_objid],
_cdict[s_objid])
< closest_stem_threshold_distance
}
return edges + list(closest_stem_per_notehead.items())
def extract_all_pairs(self, cropobjects):
pairs = []
features = []
for u in cropobjects:
for v in cropobjects:
if u.objid == v.objid:
continue
distance = cropobject_distance(u, v)
if distance < self.MAXIMUM_DISTANCE_THRESHOLD:
pairs.append((u, v))
f = self.extractor(u, v)
features.append(f)
# logging.info('Parsing features: {0}'.format(features[0]))
features = numpy.array(features)
# logging.info('Parsing features: {0}/{1}'.format(features.shape, features))
return pairs, features
def is_edge(self, c_from, c_to):
features = self.extractor(c_from, c_to)
result = self.clf.predict(features)
return result
def set_grammar(self, grammar):
self.grammar = grammar
##############################################################################
# Feature extraction
class PairwiseClfFeatureExtractor(object):
def __init__(self, vectorizer=None):
"""Initialize the feature extractor.
:param vectorizer: A DictVectorizer() from scikit-learn.
Used to convert feature dicts to the vectors that
the edge classifier of the parser will expect.
If None, will create a new DictVectorizer. (This is useful
for training; you can then pickle the entire extractor
and make sure the feature extraction works for the classifier
at runtime.)
"""
if vectorizer is None:
vectorizer = DictVectorizer()
self.vectorizer = vectorizer
def __call__(self, *args, **kwargs):
"""The call is per item (in this case, CropObject pair)."""
fd = self.get_features_relative_bbox_and_clsname(*args, **kwargs)
# Compensate for the vecotrizer "target", which we don't have here (by :-1)
item_features = self.vectorizer.transform(fd).toarray()[0, :-1]
return item_features
def get_features_relative_bbox_and_clsname(self, c_from, c_to):
"""Extract a feature vector from the given pair of CropObjects.
Does *NOT* convert the class names to integers.
Features: bbox(c_to) - bbox(c_from), clsname(c_from), clsname(c_to)
Target: 1 if there is a link from u to v
Returns a dict that works as input to ``self.vectorizer``.
"""
target = 0
if c_from.doc == c_to.doc:
if c_to.objid in c_from.outlinks:
target = 1
features = (c_to.top - c_from.top,
c_to.left - c_from.left,
c_to.bottom - c_from.bottom,
c_to.right - c_from.right,
c_from.clsname,
c_to.clsname,
target)
dt, dl, db, dr, cu, cv, tgt = features
# Normalizing clsnames
if cu.startswith('letter'): cu = 'letter'
if cu.startswith('numeral'): cu = 'numeral'
if cv.startswith('letter'): cv = 'letter'
if cv.startswith('numeral'): cv = 'numeral'
feature_dict = {'dt': dt,
'dl': dl,
'db': db,
'dr': dr,
'cls_from': cu,
'cls_to': cv,
'target': tgt}
return feature_dict
def get_features_distance_relative_bbox_and_clsname(self, c_from, c_to):
"""Extract a feature vector from the given pair of CropObjects.
Does *NOT* convert the class names to integers.
Features: bbox(c_to) - bbox(c_from), clsname(c_from), clsname(c_to)
Target: 1 if there is a link from u to v
Returns a tuple.
"""
target = 0
if c_from.doc == c_to.doc:
if c_to.objid in c_from.outlinks:
target = 1
distance = cropobject_distance(c_from, c_to)
features = (distance,
c_to.top - c_from.top,
c_to.left - c_from.left,
c_to.bottom - c_from.bottom,
c_to.right - c_from.right,
c_from.clsname,
c_to.clsname,
target)
dist, dt, dl, db, dr, cu, cv, tgt = features
if cu.startswith('letter'): cu = 'letter'
if cu.startswith('numeral'): cu = 'numeral'
if cv.startswith('letter'): cv = 'letter'
if cv.startswith('numeral'): cv = 'numeral'
feature_dict = {'dist': dist,
'dt': dt,
'dl': dl,
'db': db,
'dr': dr,
'cls_from': cu,
'cls_to': cv,
'target': tgt}
return feature_dict
| apache-2.0 |
elkingtonmcb/scikit-learn | sklearn/linear_model/sag.py | 64 | 9815 | """Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <[email protected]>
#
# Licence: BSD 3 clause
import numpy as np
import warnings
from ..utils import ConvergenceWarning
from ..utils import check_array
from .base import make_dataset
from .sgd_fast import Log, SquaredLoss
from .sag_fast import sag, get_max_squared_sum
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
Returns
-------
step_size : float
Step size used in SAG solver.
"""
if loss == 'log':
# inverse Lipschitz constant for log loss
return 4.0 / (max_squared_sum + int(fit_intercept)
+ 4.0 * alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
return 1.0 / (max_squared_sum + int(fit_intercept) + alpha_scaled)
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=dict()):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared'
Loss function that will be optimized.
'log' is used for classification, like in LogisticRegression.
'squared' is used for regression, like in Ridge.
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter: int, optional
The max number of passes over the training data if the stopping
criterea is not reached. Defaults to 1000.
tol: double, optional
The stopping criterea for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose: integer, optional
The verbosity level.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem: dict, optional
The initialization parameters used for warm starting. It is currently
not used in Ridge.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and eventually the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
coef_init = np.zeros(n_features, dtype=np.float64, order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.size == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1]
coef_init = coef_init[:-1]
else:
intercept_init = 0.0
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient_init = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient_init = 0.0
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros(n_samples, dtype=np.float64,
order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros(n_features, dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
if loss == 'log':
class_loss = Log()
elif loss == 'squared':
class_loss = SquaredLoss()
else:
raise ValueError("Invalid loss parameter: got %r instead of "
"one of ('log', 'squared')" % loss)
intercept_, num_seen, n_iter_, intercept_sum_gradient = \
sag(dataset, coef_init.ravel(),
intercept_init, n_samples,
n_features, tol,
max_iter,
class_loss,
step_size, alpha_scaled,
sum_gradient_init.ravel(),
gradient_memory_init.ravel(),
seen_init.ravel(),
num_seen_init,
fit_intercept,
intercept_sum_gradient_init,
intercept_decay,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
coef_ = coef_init
if fit_intercept:
coef_ = np.append(coef_, intercept_)
warm_start_mem = {'coef': coef_, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
return coef_, n_iter_, warm_start_mem
| bsd-3-clause |
decvalts/cartopy | lib/cartopy/examples/eyja_volcano.py | 1 | 1831 | # -*- coding: utf-8 -*-
"""
Map tile acquisition
--------------------
Demonstrates cartopy's ability to draw map tiles which are downloaded on
demand from the Stamen tile server. Internally these tiles are then combined
into a single image and displayed in the cartopy GeoAxes.
"""
__tags__ = ["Scalar data"]
import matplotlib.pyplot as plt
from matplotlib.transforms import offset_copy
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
def main():
# Create a Stamen Terrain instance.
stamen_terrain = cimgt.StamenTerrain()
fig = plt.figure()
# Create a GeoAxes in the tile's projection.
ax = fig.add_subplot(1, 1, 1, projection=stamen_terrain.crs)
# Limit the extent of the map to a small longitude/latitude range.
ax.set_extent([-22, -15, 63, 65], crs=ccrs.Geodetic())
# Add the Stamen data at zoom level 8.
ax.add_image(stamen_terrain, 8)
# Add a marker for the Eyjafjallajökull volcano.
ax.plot(-19.613333, 63.62, marker='o', color='red', markersize=12,
alpha=0.7, transform=ccrs.Geodetic())
# Use the cartopy interface to create a matplotlib transform object
# for the Geodetic coordinate system. We will use this along with
# matplotlib's offset_copy function to define a coordinate system which
# translates the text by 25 pixels to the left.
geodetic_transform = ccrs.Geodetic()._as_mpl_transform(ax)
text_transform = offset_copy(geodetic_transform, units='dots', x=-25)
# Add text 25 pixels to the left of the volcano.
ax.text(-19.613333, 63.62, u'Eyjafjallajökull',
verticalalignment='center', horizontalalignment='right',
transform=text_transform,
bbox=dict(facecolor='sandybrown', alpha=0.5, boxstyle='round'))
plt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
davidam/python-examples | scikit/text-classifier-example.py | 1 | 2200 | import sklearn
import numpy as np
from glob import glob
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn import metrics
from sklearn.pipeline import Pipeline
def pathfinder(targetPath):
path_string = targetPath.replace('/path/to/project/folder/','')
path_string = path_string.strip('/')
return path_string
# Get paths to labelled data
rawFolderPaths = glob("/path/to/project/folder/*/")
print ('\nGathering labelled categories...\n')
categories = []
# Extract the folder paths, reduce down to the label and append to the categories list
for i in rawFolderPaths:
category = pathfinder(i)
categories.append(category)
# Load the data
print ('\nLoading the dataset...\n')
docs_to_train = sklearn.datasets.load_files("/Users/danielhoadley/PycharmProjects/trainer/!labelled_data_reportXML",
description=None, categories=categories, load_content=True,
encoding='utf-8', shuffle=True, random_state=42)
# Split the dataset into training and testing sets
print ('\nBuilding out hold-out test sample...\n')
X_train, X_test, y_train, y_test = train_test_split(docs_to_train.data, docs_to_train.target, test_size=0.4)
# Construct the classifier pipeline using a SGDClassifier algorithm
print ('\nApplying the classifier...\n')
text_clf = Pipeline([('vect', CountVectorizer(stop_words='english')),
('tfidf', TfidfTransformer(use_idf=True)),
('clf', SGDClassifier(loss='hinge', penalty='l2',
alpha=1e-3, random_state=42, verbose=1)),
])
# Fit the model to the training data
text_clf.fit(X_train, y_train)
# Run the test data into the model
predicted = text_clf.predict(X_test)
# Calculate mean accuracy of predictions
print (np.mean(predicted == y_test))
# Generate labelled performance metrics
print(metrics.classification_report(y_test, predicted,
target_names=docs_to_train.target_names)) | gpl-3.0 |
dhhjx880713/GPy | doc/source/conf.py | 4 | 12135 | # -*- coding: utf-8 -*-
#
# GPy documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 18 18:16:28 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#for p in os.walk('../../GPy'):
# sys.path.append(p[0])
sys.path.insert(0, os.path.abspath('../../'))
#sys.path.insert(0, os.path.abspath('../../GPy/'))
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
#on_rtd = True
if on_rtd:
# sys.path.append(os.path.abspath('../GPy'))
import subprocess
proc = subprocess.Popen("pwd", stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "program output:", out
proc = subprocess.Popen("ls ../../", stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "program output:", out
#Lets regenerate our rst files from the source, -P adds private modules (i.e kern._src)
proc = subprocess.Popen("sphinx-apidoc -P -f -o . ../../GPy", stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "program output:", out
#proc = subprocess.Popen("whereis numpy", stdout=subprocess.PIPE, shell=True)
#(out, err) = proc.communicate()
#print "program output:", out
#proc = subprocess.Popen("whereis matplotlib", stdout=subprocess.PIPE, shell=True)
#(out, err) = proc.communicate()
#print "program output:", out
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
#----- Autodoc
#import sys
#try:
# from unittest.mock import MagicMock
#except:
# from mock import Mock as MagicMock
#
#class Mock(MagicMock):
# @classmethod
# def __getattr__(cls, name):
# return Mock()
#
MOCK_MODULES = ['scipy.linalg.blas', 'blas', 'scipy.optimize', 'scipy.optimize.linesearch', 'scipy.linalg',
'scipy', 'scipy.special', 'scipy.integrate', 'scipy.io', 'scipy.stats',
'sympy', 'sympy.utilities.iterables', 'sympy.utilities.lambdify',
'sympy.utilities', 'sympy.utilities.codegen', 'sympy.core.cache',
'sympy.core', 'sympy.parsing', 'sympy.parsing.sympy_parser',
'nose', 'nose.tools'
]
autodoc_mock_imports = MOCK_MODULES
#
#sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
#
import sphinx_rtd_theme
autodoc_default_flags = ['members',
#'undoc-members',
#'private-members',
#'special-members',
#'inherited-members',
'show-inheritance']
autodoc_member_order = 'groupwise'
add_function_parentheses = False
add_module_names = False
#modindex_common_prefix = ['GPy']
show_authors = True
# ------ Sphinx
# Add any paths that contain templates here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]#templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GPy'
#author = u'`Humans <https://github.com/SheffieldML/GPy/graphs/contributors>`_'
author = 'GPy Authors, see https://github.com/SheffieldML/GPy/graphs/contributors'
copyright = u'2015, '+author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
with open('../../GPy/__version__.py', 'r') as f:
version = f.read()
release = version
print version
# version = '0.8.8'
# The full version, including alpha/beta/rc tags.
# release = '0.8.8'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = dict(sidebarwidth='20}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {
# '**': ['globaltoc.html', 'localtoc.html', 'sourcelink.html', 'searchbox.html'],
# 'using/windows': ['windowssidebar.html', 'searchbox.html'],
#}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = False
# If false, no index is generated.
#html_use_index = False
# If true, the index is split into individual pages for each letter.
html_split_index = True
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'GPydoc'
# -- Options for LaTeX output ---------------------------------------------
#latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
#}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
#latex_documents = [
# (master_doc, 'GPy.tex', u'GPy Documentation',
# u'GPy Authors', 'manual'),
#]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
#man_pages = [
# (master_doc, 'gpy', u'GPy Documentation',
# [author], 1)
#]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
#texinfo_documents = [
# (master_doc, 'GPy', u'GPy Documentation',
# author, 'GPy', 'One line description of project.',
# 'Miscellaneous'),
#]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause |
zhenv5/scikit-learn | sklearn/tests/test_common.py | 70 | 7717 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance,
check_fit2d_predict1d,
check_fit1d_1sample)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
yield check_get_params_invariance, name, Estimator
| bsd-3-clause |
chenyyx/scikit-learn-doc-zh | examples/en/feature_selection/plot_feature_selection.py | 53 | 2840 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
# #############################################################################
# Import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
# #############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='darkorange',
edgecolor='black')
# #############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight',
color='navy', edgecolor='black')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='c',
edgecolor='black')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| gpl-3.0 |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/pandas/core/internals/concat.py | 1 | 20709 | from collections import defaultdict
import copy
from typing import TYPE_CHECKING, Any, Dict, List, Sequence, Tuple, cast
import numpy as np
from pandas._libs import NaT, internals as libinternals
from pandas._typing import DtypeObj, Shape
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
get_dtype,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_float_dtype,
is_numeric_dtype,
is_sparse,
is_timedelta64_dtype,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.missing import isna_all
import pandas.core.algorithms as algos
from pandas.core.arrays import DatetimeArray, ExtensionArray
from pandas.core.internals.blocks import make_block
from pandas.core.internals.managers import BlockManager
if TYPE_CHECKING:
from pandas.core.arrays.sparse.dtype import SparseDtype
def concatenate_block_managers(
mgrs_indexers, axes, concat_axis: int, copy: bool
) -> BlockManager:
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
Returns
-------
BlockManager
"""
concat_plans = [
_get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers
]
concat_plan = _combine_concat_plans(concat_plans, concat_axis)
blocks = []
for placement, join_units in concat_plan:
if len(join_units) == 1 and not join_units[0].indexers:
b = join_units[0].block
values = b.values
if copy:
values = values.copy()
else:
values = values.view()
b = b.make_block_same_class(values, placement=placement)
elif _is_uniform_join_units(join_units):
blk = join_units[0].block
vals = [ju.block.values for ju in join_units]
if not blk.is_extension:
values = concat_compat(vals, axis=blk.ndim - 1)
else:
# TODO(EA2D): special-casing not needed with 2D EAs
values = concat_compat(vals)
if not isinstance(values, ExtensionArray):
values = values.reshape(1, len(values))
b = make_block(values, placement=placement, ndim=blk.ndim)
else:
b = make_block(
_concatenate_join_units(join_units, concat_axis, copy=copy),
placement=placement,
ndim=len(axes),
)
blocks.append(b)
return BlockManager(blocks, axes)
def _get_mgr_concatenation_plan(mgr, indexers):
"""
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape_list = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape_list[ax] = len(indexer)
mgr_shape = tuple(mgr_shape_list)
if 0 in indexers:
ax0_indexer = indexers.pop(0)
blknos = algos.take_1d(mgr.blknos, ax0_indexer, fill_value=-1)
blklocs = algos.take_1d(mgr.blklocs, ax0_indexer, fill_value=-1)
else:
if mgr.is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
ax0_indexer = None
blknos = mgr.blknos
blklocs = mgr.blklocs
plan = []
for blkno, placements in libinternals.get_blkno_placements(blknos, group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.copy()
shape_list = list(mgr_shape)
shape_list[0] = len(placements)
shape = tuple(shape_list)
if blkno == -1:
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (
len(placements) == len(blk.mgr_locs)
and
# Fastpath detection of join unit not
# needing to reindex its block: no ax0
# reindexing took place and block
# placement was sequential before.
(
(
ax0_indexer is None
and blk.mgr_locs.is_slice_like
and blk.mgr_locs.as_slice.step == 1
)
or
# Slow-ish detection: all indexer locs
# are sequential (and length match is
# checked above).
(np.diff(ax0_blk_indexer) == 1).all()
)
)
# Omit indexer if no item reindexing is required.
if unit_no_ax0_reindexing:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
return plan
class JoinUnit:
def __init__(self, block, shape: Shape, indexers=None):
# Passing shape explicitly is required for cases when block is None.
if indexers is None:
indexers = {}
self.block = block
self.indexers = indexers
self.shape = shape
def __repr__(self) -> str:
return f"{type(self).__name__}({repr(self.block)}, {self.indexers})"
@cache_readonly
def needs_filling(self) -> bool:
for indexer in self.indexers.values():
# FIXME: cache results of indexer == -1 checks.
if (indexer == -1).any():
return True
return False
@cache_readonly
def dtype(self):
if self.block is None:
raise AssertionError("Block is None, no dtype")
if not self.needs_filling:
return self.block.dtype
else:
return get_dtype(maybe_promote(self.block.dtype, self.block.fill_value)[0])
@cache_readonly
def is_na(self) -> bool:
if self.block is None:
return True
if not self.block._can_hold_na:
return False
# Usually it's enough to check but a small fraction of values to see if
# a block is NOT null, chunks should help in such cases. 1000 value
# was chosen rather arbitrarily.
values = self.block.values
if is_sparse(self.block.values.dtype):
return False
elif self.block.is_extension:
# TODO(EA2D): no need for special case with 2D EAs
values_flat = values
else:
values_flat = values.ravel(order="K")
return isna_all(values_flat)
def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na):
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self.is_na:
if getattr(self.block, "is_object", False):
# we want to avoid filling with np.nan if we are
# using None; we already know that we are all
# nulls
values = self.block.values.ravel(order="K")
if len(values) and values[0] is None:
fill_value = None
if getattr(self.block, "is_datetimetz", False) or is_datetime64tz_dtype(
empty_dtype
):
if self.block is None:
# TODO(EA2D): special case unneeded with 2D EAs
return DatetimeArray(
np.full(self.shape[1], fill_value.value), dtype=empty_dtype
)
elif getattr(self.block, "is_categorical", False):
pass
elif getattr(self.block, "is_extension", False):
pass
elif is_extension_array_dtype(empty_dtype):
missing_arr = empty_dtype.construct_array_type()._from_sequence(
[], dtype=empty_dtype
)
ncols, nrows = self.shape
assert ncols == 1, ncols
empty_arr = -1 * np.ones((nrows,), dtype=np.intp)
return missing_arr.take(
empty_arr, allow_fill=True, fill_value=fill_value
)
else:
missing_arr = np.empty(self.shape, dtype=empty_dtype)
missing_arr.fill(fill_value)
return missing_arr
if (not self.indexers) and (not self.block._can_consolidate):
# preserve these for validation in concat_compat
return self.block.values
if self.block.is_bool and not self.block.is_categorical:
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
elif self.block.is_extension:
values = self.block.values
else:
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.values
if not self.indexers:
# If there's no indexing to be done, we want to signal outside
# code that this array must be copied explicitly. This is done
# by returning a view and checking `retval.base`.
values = values.view()
else:
for ax, indexer in self.indexers.items():
values = algos.take_nd(values, indexer, axis=ax, fill_value=fill_value)
return values
def _concatenate_join_units(join_units, concat_axis, copy):
"""
Concatenate values from several join units along selected axis.
"""
if concat_axis == 0 and len(join_units) > 1:
# Concatenating join units along ax0 is handled in _merge_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype, upcasted_na = _get_empty_dtype_and_na(join_units)
to_concat = [
ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na)
for ju in join_units
]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
if copy:
if isinstance(concat_values, np.ndarray):
# non-reindexed (=not yet copied) arrays are made into a view
# in JoinUnit.get_reindexed_values
if concat_values.base is not None:
concat_values = concat_values.copy()
else:
concat_values = concat_values.copy()
elif any(isinstance(t, ExtensionArray) for t in to_concat):
# concatting with at least one EA means we are concatting a single column
# the non-EA values are 2D arrays with shape (1, n)
to_concat = [t if isinstance(t, ExtensionArray) else t[0, :] for t in to_concat]
concat_values = concat_compat(to_concat, axis=0)
if not isinstance(concat_values, ExtensionArray) or (
isinstance(concat_values, DatetimeArray) and concat_values.tz is None
):
# if the result of concat is not an EA but an ndarray, reshape to
# 2D to put it a non-EA Block
# special case DatetimeArray, which *is* an EA, but is put in a
# consolidated 2D block
concat_values = np.atleast_2d(concat_values)
else:
concat_values = concat_compat(to_concat, axis=concat_axis)
return concat_values
def _get_empty_dtype_and_na(join_units: Sequence[JoinUnit]) -> Tuple[DtypeObj, Any]:
"""
Return dtype and N/A values to use when concatenating specified units.
Returned N/A value may be None which means there was no casting involved.
Returns
-------
dtype
na
"""
if len(join_units) == 1:
blk = join_units[0].block
if blk is None:
return np.dtype(np.float64), np.nan
if _is_uniform_reindex(join_units):
# FIXME: integrate property
empty_dtype = join_units[0].block.dtype
upcasted_na = join_units[0].block.fill_value
return empty_dtype, upcasted_na
has_none_blocks = False
dtypes = [None] * len(join_units)
for i, unit in enumerate(join_units):
if unit.block is None:
has_none_blocks = True
else:
dtypes[i] = unit.dtype
upcast_classes = _get_upcast_classes(join_units, dtypes)
# TODO: de-duplicate with maybe_promote?
# create the result
if "extension" in upcast_classes:
if len(upcast_classes) == 1:
cls = upcast_classes["extension"][0]
return cls, cls.na_value
else:
return np.dtype("object"), np.nan
elif "object" in upcast_classes:
return np.dtype(np.object_), np.nan
elif "bool" in upcast_classes:
if has_none_blocks:
return np.dtype(np.object_), np.nan
else:
return np.dtype(np.bool_), None
elif "category" in upcast_classes:
return np.dtype(np.object_), np.nan
elif "datetimetz" in upcast_classes:
# GH-25014. We use NaT instead of iNaT, since this eventually
# ends up in DatetimeArray.take, which does not allow iNaT.
dtype = upcast_classes["datetimetz"]
return dtype[0], NaT
elif "datetime" in upcast_classes:
return np.dtype("M8[ns]"), np.datetime64("NaT", "ns")
elif "timedelta" in upcast_classes:
return np.dtype("m8[ns]"), np.timedelta64("NaT", "ns")
else: # pragma
try:
common_dtype = np.find_common_type(upcast_classes, [])
except TypeError:
# At least one is an ExtensionArray
return np.dtype(np.object_), np.nan
else:
if is_float_dtype(common_dtype):
return common_dtype, common_dtype.type(np.nan)
elif is_numeric_dtype(common_dtype):
if has_none_blocks:
return np.dtype(np.float64), np.nan
else:
return common_dtype, None
msg = "invalid dtype determination in get_concat_dtype"
raise AssertionError(msg)
def _get_upcast_classes(
join_units: Sequence[JoinUnit],
dtypes: Sequence[DtypeObj],
) -> Dict[str, List[DtypeObj]]:
"""Create mapping between upcast class names and lists of dtypes."""
upcast_classes: Dict[str, List[DtypeObj]] = defaultdict(list)
null_upcast_classes: Dict[str, List[DtypeObj]] = defaultdict(list)
for dtype, unit in zip(dtypes, join_units):
if dtype is None:
continue
upcast_cls = _select_upcast_cls_from_dtype(dtype)
# Null blocks should not influence upcast class selection, unless there
# are only null blocks, when same upcasting rules must be applied to
# null upcast classes.
if unit.is_na:
null_upcast_classes[upcast_cls].append(dtype)
else:
upcast_classes[upcast_cls].append(dtype)
if not upcast_classes:
upcast_classes = null_upcast_classes
return upcast_classes
def _select_upcast_cls_from_dtype(dtype: DtypeObj) -> str:
"""Select upcast class name based on dtype."""
if is_categorical_dtype(dtype):
return "category"
elif is_datetime64tz_dtype(dtype):
return "datetimetz"
elif is_extension_array_dtype(dtype):
return "extension"
elif issubclass(dtype.type, np.bool_):
return "bool"
elif issubclass(dtype.type, np.object_):
return "object"
elif is_datetime64_dtype(dtype):
return "datetime"
elif is_timedelta64_dtype(dtype):
return "timedelta"
elif is_sparse(dtype):
dtype = cast("SparseDtype", dtype)
return dtype.subtype.name
elif is_float_dtype(dtype) or is_numeric_dtype(dtype):
return dtype.name
else:
return "float"
def _is_uniform_join_units(join_units: List[JoinUnit]) -> bool:
"""
Check if the join units consist of blocks of uniform type that can
be concatenated using Block.concat_same_type instead of the generic
_concatenate_join_units (which uses `concat_compat`).
"""
# TODO: require dtype match in addition to same type? e.g. DatetimeTZBlock
# cannot necessarily join
return (
# all blocks need to have the same type
all(type(ju.block) is type(join_units[0].block) for ju in join_units) # noqa
and
# no blocks that would get missing values (can lead to type upcasts)
# unless we're an extension dtype.
all(not ju.is_na or ju.block.is_extension for ju in join_units)
and
# no blocks with indexers (as then the dimensions do not fit)
all(not ju.indexers for ju in join_units)
and
# only use this path when there is something to concatenate
len(join_units) > 1
)
def _is_uniform_reindex(join_units) -> bool:
return (
# TODO: should this be ju.block._can_hold_na?
all(ju.block and ju.block.is_extension for ju in join_units)
and len({ju.block.dtype.name for ju in join_units}) == 1
)
def _trim_join_unit(join_unit, length):
"""
Reduce join_unit's shape along item axis to length.
Extra items that didn't fit are returned as a separate block.
"""
if 0 not in join_unit.indexers:
extra_indexers = join_unit.indexers
if join_unit.block is None:
extra_block = None
else:
extra_block = join_unit.block.getitem_block(slice(length, None))
join_unit.block = join_unit.block.getitem_block(slice(length))
else:
extra_block = join_unit.block
extra_indexers = copy.copy(join_unit.indexers)
extra_indexers[0] = extra_indexers[0][length:]
join_unit.indexers[0] = join_unit.indexers[0][:length]
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
return JoinUnit(block=extra_block, indexers=extra_indexers, shape=extra_shape)
def _combine_concat_plans(plans, concat_axis):
"""
Combine multiple concatenation plans into one.
existing_plan is updated in-place.
"""
if len(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concat_axis == 0:
offset = 0
for plan in plans:
last_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
last_plc = plc
if last_plc is not None:
offset += last_plc.as_slice.stop
else:
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(map(iter, plans))
next_items = list(map(_next_or_none, plans))
while num_ended[0] != len(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengths = list(map(len, placements))
min_len, max_len = min(lengths), max(lengths)
if min_len == max_len:
yield placements[0], units
next_items[:] = map(_next_or_none, plans)
else:
yielded_placement = None
yielded_units = [None] * len(next_items)
for i, (plc, unit) in enumerate(next_items):
yielded_units[i] = unit
if len(plc) > min_len:
# _trim_join_unit updates unit in place, so only
# placement needs to be sliced to skip min_len.
next_items[i] = (plc[min_len:], _trim_join_unit(unit, min_len))
else:
yielded_placement = plc
next_items[i] = _next_or_none(plans[i])
yield yielded_placement, yielded_units
| gpl-2.0 |
fergalbyrne/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/__init__.py | 72 | 2225 |
import matplotlib
import inspect
import warnings
# ipython relies on interactive_bk being defined here
from matplotlib.rcsetup import interactive_bk
__all__ = ['backend','show','draw_if_interactive',
'new_figure_manager', 'backend_version']
backend = matplotlib.get_backend() # validates, to match all_backends
def pylab_setup():
'return new_figure_manager, draw_if_interactive and show for pylab'
# Import the requested backend into a generic module object
if backend.startswith('module://'):
backend_name = backend[9:]
else:
backend_name = 'backend_'+backend
backend_name = backend_name.lower() # until we banish mixed case
backend_name = 'matplotlib.backends.%s'%backend_name.lower()
backend_mod = __import__(backend_name,
globals(),locals(),[backend_name])
# Things we pull in from all backends
new_figure_manager = backend_mod.new_figure_manager
# image backends like pdf, agg or svg do not need to do anything
# for "show" or "draw_if_interactive", so if they are not defined
# by the backend, just do nothing
def do_nothing_show(*args, **kwargs):
frame = inspect.currentframe()
fname = frame.f_back.f_code.co_filename
if fname in ('<stdin>', '<ipython console>'):
warnings.warn("""
Your currently selected backend, '%s' does not support show().
Please select a GUI backend in your matplotlibrc file ('%s')
or with matplotlib.use()""" %
(backend, matplotlib.matplotlib_fname()))
def do_nothing(*args, **kwargs): pass
backend_version = getattr(backend_mod,'backend_version', 'unknown')
show = getattr(backend_mod, 'show', do_nothing_show)
draw_if_interactive = getattr(backend_mod, 'draw_if_interactive', do_nothing)
# Additional imports which only happen for certain backends. This section
# should probably disappear once all backends are uniform.
if backend.lower() in ['wx','wxagg']:
Toolbar = backend_mod.Toolbar
__all__.append('Toolbar')
matplotlib.verbose.report('backend %s version %s' % (backend,backend_version))
return new_figure_manager, draw_if_interactive, show
| agpl-3.0 |
justacec/bokeh | bokeh/charts/glyphs.py | 3 | 36943 | from __future__ import absolute_import, division
from collections import defaultdict
import numpy as np
import pandas as pd
from six import iteritems
from bokeh.charts import DEFAULT_PALETTE
from bokeh.core.enums import DashPattern
from bokeh.models.glyphs import Rect, Segment, Line, Patches, Arc
from bokeh.models.renderers import GlyphRenderer
from bokeh.core.properties import (Float, String, Datetime, Bool, Instance,
List, Either, Int, Enum, Color, Override, Any, Angle)
from .models import CompositeGlyph
from .properties import Column, EitherColumn
from .stats import (Stat, Quantile, Sum, Min, Max, Bins, stats, Histogram,
BinnedStat)
from .data_source import ChartDataSource
from .utils import marker_types, generate_patch_base, label_from_index_dict
class NestedCompositeGlyph(CompositeGlyph):
"""A composite glyph that consists of other composite glyphs.
An important responsibility of any `CompositeGlyph` is to understand the bounds
of the glyph renderers that make it up. This class is used to provide convenient
properties that return the bounds from the child `CompositeGlyphs`.
"""
children = List(Instance(CompositeGlyph))
@property
def y_max(self):
return max([renderer.y_max for renderer in self.children])
@property
def y_min(self):
return min([renderer.y_min for renderer in self.children])
@property
def x_min(self):
return min([renderer.x_min for renderer in self.children])
@property
def x_max(self):
return max([renderer.x_max for renderer in self.children])
class XyGlyph(CompositeGlyph):
"""Composite glyph that plots in cartesian coordinates."""
x = EitherColumn(String, Column(Float), Column(String), Column(Datetime), Column(Bool))
y = EitherColumn(String, Column(Float), Column(String), Column(Datetime), Column(Bool))
def build_source(self):
labels = self._build_label_array(('x', 'y'), self.label)
str_labels = [str(label) for label in labels]
if self.x is None:
data = dict(x_values=str_labels, y_values=self.y)
elif self.y is None:
data = dict(x_values=self.x, y_values=str_labels)
else:
data = dict(x_values=self.x, y_values=self.y)
return data
def _build_label_array(self, props, value):
for prop in props:
if getattr(self, prop) is not None:
return [value] * len(getattr(self, prop))
@property
def x_max(self):
# TODO(fpliger): since CompositeGlyphs are not exposed in general we
# should expect to always have a Series but in case
# it's not we just use the default min/max instead
# of just failing. When/If we end up exposing
# CompositeGlyphs we should consider making this
# more robust (either enforcing data or checking)
try:
return self.source.data['x_values'].max()
except AttributeError:
return max(self.source.data['x_values'])
@property
def x_min(self):
try:
return self.source.data['x_values'].min()
except AttributeError:
return min(self.source.data['x_values'])
@property
def y_max(self):
try:
return self.source.data['y_values'].max()
except AttributeError:
return max(self.source.data['y_values'])
@property
def y_min(self):
try:
return self.source.data['y_values'].min()
except AttributeError:
return min(self.source.data['y_values'])
class PointGlyph(XyGlyph):
"""A set of glyphs placed in x,y coordinates with the same attributes."""
fill_color = Override(default=DEFAULT_PALETTE[1])
fill_alpha = Override(default=0.7)
marker = String(default='circle')
size = Float(default=8)
def __init__(self, x=None, y=None, color=None, line_color=None, fill_color=None,
marker=None, size=None, **kwargs):
kwargs['x'] = x
kwargs['y'] = y
if marker is not None: kwargs['marker'] = marker
if size is not None: kwargs['size'] = size
if color:
line_color = color
fill_color = color
kwargs['line_color'] = line_color
kwargs['fill_color'] = fill_color
super(PointGlyph, self).__init__(**kwargs)
self.setup()
def get_glyph(self):
return marker_types[self.marker]
def build_renderers(self):
glyph_type = self.get_glyph()
glyph = glyph_type(x='x_values', y='y_values',
line_color=self.line_color,
fill_color=self.fill_color,
size=self.size,
fill_alpha=self.fill_alpha,
line_alpha=self.line_alpha)
yield GlyphRenderer(glyph=glyph)
class LineGlyph(XyGlyph):
"""Represents a group of data as a line."""
width = Int(default=2)
dash = Enum(DashPattern, default='solid')
def __init__(self, x=None, y=None, color=None, line_color=None,
width=None, dash=None, **kwargs):
kwargs['x'] = x
kwargs['y'] = y
if color is not None and line_color is None:
line_color = color
if dash is not None:
kwargs['dash'] = dash
if width is not None:
kwargs['width'] = width
if line_color is not None:
kwargs['line_color'] = line_color
super(LineGlyph, self).__init__(**kwargs)
self.setup()
def build_source(self):
if self.x is None:
x = self.y.index
data = dict(x_values=x, y_values=self.y)
elif self.y is None:
y = self.x.index
data = dict(x_values=self.x, y_values=y)
else:
data = dict(x_values=self.x, y_values=self.y)
return data
def build_renderers(self):
"""Yield a `GlyphRenderer` for the group of data."""
glyph = Line(x='x_values', y='y_values',
line_color=self.line_color,
line_alpha=self.line_alpha,
line_width=self.width,
line_dash=self.dash)
yield GlyphRenderer(glyph=glyph)
class AreaGlyph(LineGlyph):
# ToDo: should these be added to composite glyph?
stack = Bool(default=False)
dodge = Bool(default=False)
base = Float(default=0.0, help="""Lower bound of area.""")
def __init__(self, **kwargs):
line_color = kwargs.get('line_color', None)
fill_color = kwargs.get('fill_color', None)
color = kwargs.get('color', None)
if color is not None:
# apply color to line and fill
kwargs['fill_color'] = color
kwargs['line_color'] = color
elif line_color is not None and fill_color is None:
# apply line color to fill color by default
kwargs['fill_color'] = line_color
super(AreaGlyph, self).__init__(**kwargs)
self.setup()
def build_source(self):
data = super(AreaGlyph, self).build_source()
x0, y0 = generate_patch_base(pd.Series(list(data['x_values'])),
pd.Series(list(data['y_values'])))
data['x_values'] = [x0]
data['y_values'] = [y0]
return data
def build_renderers(self):
# parse all series. We exclude the first attr as it's the x values
# added for the index
glyph = Patches(
xs='x_values', ys='y_values',
fill_alpha=self.fill_alpha, fill_color=self.fill_color,
line_color=self.line_color
)
renderer = GlyphRenderer(data_source=self.source, glyph=glyph)
yield renderer
def __stack__(self, glyphs):
# ToDo: need to handle case of non-aligned indices, see pandas concat
# ToDo: need to address how to aggregate on an index when required
# build a list of series
areas = []
for glyph in glyphs:
areas.append(pd.Series(glyph.source.data['y_values'][0],
index=glyph.source.data['x_values'][0]))
# concat the list of indexed y values into dataframe
df = pd.concat(areas, axis=1)
# calculate stacked values along the rows
stacked_df = df.cumsum(axis=1)
# lower bounds of each area series are diff between stacked and orig values
lower_bounds = stacked_df - df
# reverse the df so the patch is drawn in correct order
lower_bounds = lower_bounds.iloc[::-1]
# concat the upper and lower bounds together
stacked_df = pd.concat([stacked_df, lower_bounds])
# update the data in the glyphs
for i, glyph in enumerate(glyphs):
glyph.source.data['x_values'] = [stacked_df.index.values]
glyph.source.data['y_values'] = [stacked_df.ix[:, i].values]
def get_nested_extent(self, col, func):
return [getattr(arr, func)() for arr in self.source.data[col]]
@property
def x_max(self):
return max(self.get_nested_extent('x_values', 'max'))
@property
def x_min(self):
return min(self.get_nested_extent('x_values', 'min'))
@property
def y_max(self):
return max(self.get_nested_extent('y_values', 'max'))
@property
def y_min(self):
return min(self.get_nested_extent('y_values', 'min'))
class HorizonGlyph(AreaGlyph):
num_folds = Int(default=3, help="""The count of times the data is overlapped.""")
series = Int(default=0, help="""The id of the series as the order it will appear,
starting from 0.""")
series_count = Int()
fold_height = Float(help="""The height of one fold.""")
bins = List(Float, help="""The binedges calculated from the number of folds,
and the maximum value of the entire source data.""")
graph_ratio = Float(help="""Scales heights of each series based on number of folds
and the number of total series being plotted.
""")
pos_color = Color("#006400", help="""The color used for positive values.""")
neg_color = Color("#6495ed", help="""The color used for negative values.""")
flip_neg = Bool(default=True, help="""When True, the negative values will be
plotted as their absolute value, then their individual axes is flipped. If False,
then the negative values will still be taken as their absolute value, but the base
of their shape will start from the same origin as the positive values.
""")
def __init__(self, bins=None, **kwargs):
# fill alpha depends on how many folds will be layered
kwargs['fill_alpha'] = 1.0/kwargs['num_folds']
if bins is not None:
kwargs['bins'] = bins
# each series is shifted up to a synthetic y-axis
kwargs['base'] = kwargs['series'] * max(bins) / kwargs['series_count']
kwargs['graph_ratio'] = float(kwargs['num_folds'])/float(kwargs['series_count'])
super(HorizonGlyph, self).__init__(**kwargs)
def build_source(self):
data = {}
# Build columns for the positive values
pos_y = self.y.copy()
pos_y[pos_y < 0] = 0
xs, ys = self._build_dims(self.x, pos_y)
# list of positive colors and alphas
colors = [self.pos_color] * len(ys)
alphas = [(bin_idx * self.fill_alpha) for bin_idx in
range(0, len(self.bins))]
# If we have negative values at all, add the values for those as well
if self.y.min() < 0:
neg_y = self.y.copy()
neg_y[neg_y > 0] = 0
neg_y = abs(neg_y)
neg_xs, neg_ys = self._build_dims(self.x, neg_y, self.flip_neg)
xs += neg_xs
ys += neg_ys
colors += ([self.neg_color] * len(neg_ys))
alphas += alphas
# create clipped representation of each band
data['x_values'] = xs
data['y_values'] = ys
data['fill_color'] = colors
data['fill_alpha'] = colors
data['line_color'] = colors
return data
def _build_dims(self, x, y, flip=False):
""" Creates values needed to plot each fold of the horizon glyph.
Bins the data based on the binning passed into the glyph, then copies and clips
the values for each bin.
Args:
x (`pandas.Series`): array of x values
y (`pandas.Series`): array of y values
flip (bool): whether to flip values, used when handling negative values
Returns:
tuple(list(`numpy.ndarray`), list(`numpy.ndarray`)): returns a list of
arrays for the x values and list of arrays for the y values. The data
has been folded and transformed so the patches glyph presents the data
in a way that looks like an area chart.
"""
# assign bins to each y value
bin_idx = pd.cut(y, bins=self.bins, labels=False, include_lowest=True)
xs, ys = [], []
for idx, bin in enumerate(self.bins[0:-1]):
# subtract off values associated with lower bins, to get into this bin
temp_vals = y.copy() - (idx * self.fold_height)
# clip the values between the fold range and zero
temp_vals[bin_idx > idx] = self.fold_height * self.graph_ratio
temp_vals[bin_idx < idx] = 0
temp_vals[bin_idx == idx] = self.graph_ratio * temp_vals[bin_idx == idx]
# if flipping, we must start the values from the top of each fold's range
if flip:
temp_vals = (self.fold_height * self.graph_ratio) - temp_vals
base = self.base + (self.fold_height * self.graph_ratio)
else:
base = self.base
# shift values up based on index of series
temp_vals += self.base
val_idx = temp_vals > 0
if pd.Series.any(val_idx):
ys.append(temp_vals)
xs.append(x)
# transform clipped data so it always starts and ends at its base value
if len(ys) > 0:
xs, ys = map(list, zip(*[generate_patch_base(x, y, base=base) for
x, y in zip(xs, ys)]))
return xs, ys
def build_renderers(self):
# parse all series. We exclude the first attr as it's the x values
# added for the index
glyph = Patches(
xs='x_values', ys='y_values',
fill_alpha=self.fill_alpha, fill_color='fill_color',
line_color='line_color'
)
renderer = GlyphRenderer(data_source=self.source, glyph=glyph)
yield renderer
class StepGlyph(LineGlyph):
"""Represents a group of data as a stepped line."""
def build_source(self):
x = self.x
y = self.y
if self.x is None:
x = self.y.index
elif self.y is None:
y = self.x.index
dtype = x.dtype if hasattr(x, 'dtype') else np.int
xs = np.empty(2*len(x)-1, dtype=dtype)
xs[::2] = x[:]
xs[1::2] = x[1:]
dtype = y.dtype if hasattr(y, 'dtype') else np.float64
ys = np.empty(2*len(y)-1, dtype=dtype)
ys[::2] = y[:]
ys[1::2] = y[:-1]
data = dict(x_values=xs, y_values=ys)
return data
class AggregateGlyph(NestedCompositeGlyph):
"""A base composite glyph for aggregating an array.
Implements default stacking and dodging behavior that other composite
glyphs can inherit.
"""
x_label = String()
x_label_value = Any()
stack_label = String()
stack_shift = Float(default=0.0)
dodge_label = String(help="""Where on the scale the glyph should be placed.""")
dodge_shift = Float(default=None)
agg = Instance(Stat, default=Sum())
span = Float(help="""The range of values represented by the aggregate.""")
def __init__(self, x_label=None, **kwargs):
label = kwargs.get('label')
if x_label is not None:
kwargs['x_label_value'] = x_label
if not isinstance(x_label, str):
x_label = str(x_label)
kwargs['x_label'] = x_label
elif label is not None:
kwargs['x_label'] = str(label)
super(AggregateGlyph, self).__init__(**kwargs)
def get_dodge_label(self, shift=0.0):
"""Generate the label defining an offset in relation to a position on a scale."""
if self.dodge_shift is None:
shift_str = ':' + str(0.5 + shift)
elif self.dodge_shift is not None:
shift_str = ':' + str(self.dodge_shift + shift)
else:
shift_str = ''
return str(label_from_index_dict(self.x_label)) + shift_str
def filter_glyphs(self, glyphs):
"""Return only the glyphs that are of the same class."""
return [glyph for glyph in glyphs if isinstance(glyph, self.__class__)]
@staticmethod
def groupby(glyphs, prop):
"""Returns a dict of `CompositeGlyph`s, grouped by unique values of prop.
For example, if all glyphs had a value of 'a' or 'b' for glyph.prop, the dict
would contain two keys, 'a' and 'b', where each value is a list of the glyphs
that had each of the values.
"""
grouped = defaultdict(list)
labels = [getattr(glyph, prop) for glyph in glyphs]
labels = [tuple(label.values()) if isinstance(label, dict) else label for label
in labels]
[grouped[label].append(glyph) for label, glyph in zip(labels, glyphs)]
labels = pd.Series(labels).drop_duplicates().values
return labels, grouped
def __stack__(self, glyphs):
"""Apply relative shifts to the composite glyphs for stacking."""
filtered_glyphs = self.filter_glyphs(glyphs)
labels, grouped = self.groupby(filtered_glyphs, 'x_label')
for label in labels:
group = grouped[label]
# separate the negative and positive aggregates into separate groups
neg_group = [glyph for glyph in group if glyph.span < 0]
pos_group = [glyph for glyph in group if glyph.span >= 0]
# apply stacking to each group separately
for group in [neg_group, pos_group]:
shift = []
for i, glyph in enumerate(group):
# save off the top of each rect's height
shift.append(glyph.span)
if i > 0:
glyph.stack_shift = sum(shift[0:i])
glyph.refresh()
def __dodge__(self, glyphs):
"""Apply relative shifts to the composite glyphs for dodging."""
if self.dodge_label is not None:
filtered_glyphs = self.filter_glyphs(glyphs)
labels, grouped = self.groupby(filtered_glyphs, 'dodge_label')
# calculate transformations
step = np.linspace(0, 1.0, len(grouped.keys()) + 1, endpoint=False)
width = min(0.2, (1. / len(grouped.keys())) ** 1.1)
# set bar attributes and re-aggregate
for i, label in enumerate(labels):
group = grouped[label]
for glyph in group:
glyph.dodge_shift = step[i + 1]
glyph.width = width
glyph.refresh()
class Interval(AggregateGlyph):
"""A rectangle representing aggregated values.
The interval is a rect glyph where two of the parallel sides represent a
summary of values. Each of the two sides is derived from a separate aggregation of
the values provided to the interval.
.. note::
A bar is a special case interval where one side is pinned and used to
communicate a value relative to it.
"""
width = Float(default=0.8)
start_agg = Either(Instance(Stat), Enum(*list(stats.keys())), default=Min(), help="""
The stat used to derive the starting point of the composite glyph.""")
end_agg = Either(Instance(Stat), Enum(*list(stats.keys())), default=Max(), help="""
The stat used to derive the end point of the composite glyph.""")
start = Float(default=0.0)
end = Float()
def __init__(self, label, values, **kwargs):
kwargs['label'] = label
kwargs['values'] = values
super(Interval, self).__init__(**kwargs)
self.setup()
def get_start(self):
"""Get the value for the start of the glyph."""
if len(self.values.index) == 1:
self.start_agg = None
return self.values[0]
elif isinstance(self.start_agg, str):
self.start_agg = stats[self.start_agg]()
self.start_agg.set_data(self.values)
return self.start_agg.value
def get_end(self):
"""Get the value for the end of the glyph."""
if isinstance(self.end_agg, str):
self.end_agg = stats[self.end_agg]()
self.end_agg.set_data(self.values)
return self.end_agg.value
def get_span(self):
"""The total range between the start and end."""
return self.end - self.start
def build_source(self):
# ToDo: Handle rotation
self.start = self.get_start()
self.end = self.get_end()
self.span = self.get_span()
width = [self.width]
if self.dodge_shift is not None:
x = [self.get_dodge_label()]
else:
x = [self.x_label]
height = [self.span]
y = [self.stack_shift + (self.span / 2.0) + self.start]
color = [self.color]
fill_alpha = [self.fill_alpha]
line_color = [self.line_color]
line_alpha = [self.line_alpha]
label = [self.label]
return dict(x=x, y=y, width=width, height=height, color=color,
fill_alpha=fill_alpha, line_color=line_color,
line_alpha=line_alpha, label=label)
@property
def x_max(self):
"""The maximum extent of the glyph in x.
.. note::
Dodging the glyph can affect the value.
"""
return (self.dodge_shift or self.x_label_value) + (self.width / 2.0)
@property
def x_min(self):
"""The maximum extent of the glyph in y.
.. note::
Dodging the glyph can affect the value.
"""
return (self.dodge_shift or self.x_label_value) - (self.width / 2.0)
@property
def y_max(self):
"""Maximum extent of all `Glyph`s.
How much we are stacking + the height of the interval + the base of the interval
.. note::
the start and end of the glyph can swap between being associated with the
min and max when the glyph end represents a negative value.
"""
return max(self.bottom, self.top)
@property
def y_min(self):
"""The minimum extent of all `Glyph`s in y.
.. note::
the start and end of the glyph can swap between being associated with the
min and max when the glyph end represents a negative value.
"""
return min(self.bottom, self.top)
@property
def bottom(self):
"""The value associated with the start of the stacked glyph."""
return self.stack_shift + self.start
@property
def top(self):
"""The value associated with the end of the stacked glyph."""
return self.stack_shift + self.span + self.start
def build_renderers(self):
"""Yields a `GlyphRenderer` associated with a `Rect` glyph."""
glyph = Rect(x='x', y='y', width='width', height='height', fill_color='color',
fill_alpha='fill_alpha', line_color='line_color')
yield GlyphRenderer(glyph=glyph)
class BarGlyph(Interval):
"""Special case of Interval where the span represents a value.
A bar always begins from 0, or the value that is being compared to, and
extends to some positive or negative value.
"""
def __init__(self, label, values, agg='sum', **kwargs):
kwargs['end_agg'] = agg
kwargs['start_agg'] = None
super(BarGlyph, self).__init__(label, values, **kwargs)
self.setup()
def get_start(self):
return 0.0
class DotGlyph(Interval):
"""Special case of Interval where the span represents a value.
A bar always begins from 0, or the value that is being compared to, and
extends to some positive or negative value.
"""
marker = String(default='circle')
size = Float(default=8)
stem = Bool(False, help="""
Whether to draw a stem from each do to the axis.
""")
stem_line_width = Float(default=1)
stem_color = String(default='black')
def __init__(self, label, values, agg='sum', **kwargs):
kwargs['end_agg'] = agg
super(DotGlyph, self).__init__(label, values, **kwargs)
self.setup()
def get_start(self):
return 0.0
def get_glyph(self):
return marker_types[self.marker]
def build_renderers(self):
if self.stem:
yield GlyphRenderer(glyph=Segment(
x0='x', y0=0, x1='x', y1='height',
line_width=self.stem_line_width,
line_color=self.stem_color,
line_alpha='fill_alpha')
)
glyph_type = self.get_glyph()
glyph = glyph_type(x='x', y='height',
line_color=self.line_color,
fill_color=self.color,
size=self.size,
fill_alpha='fill_alpha',
line_alpha='line_alpha'
)
yield GlyphRenderer(glyph=glyph)
class QuartileGlyph(Interval):
"""An interval that has start and end aggregations of quartiles."""
def __init__(self, label, values, interval1, interval2, **kwargs):
kwargs['label'] = label
kwargs['values'] = values
kwargs['start_agg'] = Quantile(interval=interval1)
kwargs['end_agg'] = Quantile(interval=interval2)
super(QuartileGlyph, self).__init__(**kwargs)
self.setup()
class BoxGlyph(AggregateGlyph):
"""Summarizes the distribution with a collection of glyphs.
A box glyph produces one "box" for a given array of vales. The box
is made up of multiple other child composite glyphs (intervals,
scatter) and directly produces glyph renderers for the whiskers,
as well.
"""
q1 = Float(help="""Derived value for 25% of all values.""")
q2 = Float(help="""Derived value for 50% of all values.""")
q3 = Float(help="""Derived value for 75% of all values.""")
iqr = Float()
w0 = Float(help='Lower whisker')
w1 = Float(help='Upper whisker')
q2_glyph = Instance(QuartileGlyph)
q3_glyph = Instance(QuartileGlyph)
whisker_glyph = Instance(GlyphRenderer)
outliers = Either(Bool, Instance(PointGlyph))
marker = String(default='circle')
whisker_width = Float(default=0.3)
whisker_line_width = Float(default=2)
whisker_span_line_width = Float(default=2)
whisker_color = String(default='black')
outlier_fill_color = String(default='red')
outlier_line_color = String(default='red')
outlier_size = Float(default=5)
bar_color = String(default='DimGrey')
def __init__(self, label, values, outliers=True, **kwargs):
width = kwargs.pop('width', None)
bar_color = kwargs.pop('color', None) or kwargs.get('bar_color', None) or self.lookup('bar_color').class_default()
kwargs['outliers'] = kwargs.pop('outliers', None) or outliers
kwargs['label'] = label
kwargs['values'] = values
x_label = kwargs.get('x_label')
kwargs['q2_glyph'] = QuartileGlyph(label=label, x_label=x_label, values=values,
interval1=0.25, interval2=0.5, width=width,
color=bar_color)
kwargs['q3_glyph'] = QuartileGlyph(label=label, x_label=x_label, values=values,
interval1=0.5, interval2=0.75, width=width,
color=bar_color)
super(BoxGlyph, self).__init__(**kwargs)
self.setup()
def build_renderers(self):
"""Yields all renderers that make up the BoxGlyph."""
self.calc_quartiles()
outlier_values = self.values[((self.values < self.w0) | (self.values > self.w1))]
self.whisker_glyph = GlyphRenderer(glyph=Segment(x0='x0s', y0='y0s', x1='x1s', y1='y1s',
line_width=self.whisker_line_width,
line_color=self.whisker_color))
if len(outlier_values) > 0 and self.outliers:
self.outliers = PointGlyph(label=self.label, y=outlier_values,
x=[self.get_dodge_label()] * len(outlier_values),
line_color=self.outlier_line_color,
fill_color=self.outlier_fill_color,
size=self.outlier_size, marker=self.marker)
for comp_glyph in self.composite_glyphs:
for renderer in comp_glyph.renderers:
yield renderer
yield self.whisker_glyph
def calc_quartiles(self):
"""Sets all derived stat properties of the BoxGlyph."""
self.q1 = self.q2_glyph.start
self.q2 = self.q2_glyph.end
self.q3 = self.q3_glyph.end
self.iqr = self.q3 - self.q1
mx = Max()
mx.set_data(self.values)
mn = Min()
mn.set_data(self.values)
self.w0 = max(self.q1 - (1.5 * self.iqr), mn.value)
self.w1 = min(self.q3 + (1.5 * self.iqr), mx.value)
def build_source(self):
"""Calculate stats and builds and returns source for whiskers."""
self.calc_quartiles()
x_label = self.get_dodge_label()
x_w0_label = self.get_dodge_label(shift=(self.whisker_width / 2.0))
x_w1_label = self.get_dodge_label(shift=-(self.whisker_width / 2.0))
# span0, whisker bar0, span1, whisker bar1
x0s = [x_label, x_w0_label, x_label, x_w0_label]
y0s = [self.w0, self.w0, self.q3, self.w1]
x1s = [x_label, x_w1_label, x_label, x_w1_label]
y1s = [self.q1, self.w0, self.w1, self.w1]
return dict(x0s=x0s, y0s=y0s, x1s=x1s, y1s=y1s)
def _set_sources(self):
"""Set the column data source on the whisker glyphs."""
self.whisker_glyph.data_source = self.source
def get_extent(self, func, prop_name):
return func([getattr(renderer, prop_name) for renderer in self.composite_glyphs])
@property
def composite_glyphs(self):
"""Returns list of composite glyphs, excluding the regular glyph renderers."""
comp_glyphs = [self.q2_glyph, self.q3_glyph]
if isinstance(self.outliers, PointGlyph):
comp_glyphs.append(self.outliers)
return comp_glyphs
@property
def x_max(self):
return self.get_extent(max, 'x_max') + self.right_buffer
@property
def x_min(self):
return self.get_extent(min, 'x_min') - self.left_buffer
@property
def y_max(self):
return max(self.w1, self.get_extent(max, 'y_max')) + self.top_buffer
@property
def y_min(self):
return min(self.w0, self.get_extent(min, 'y_min')) - self.bottom_buffer
class HistogramGlyph(AggregateGlyph):
"""Depicts the distribution of values using rectangles created by binning.
The histogram represents a distribution, so will likely include other
options for displaying it, such as KDE and cumulative density.
"""
# derived models
bins = Instance(BinnedStat, help="""A stat used to calculate the bins. The bins stat
includes attributes about each composite bin.""")
bars = List(Instance(BarGlyph), help="""The histogram is comprised of many
BarGlyphs that are derived from the values.""")
density = Bool(False, help="""
Whether to normalize the histogram.
If True, the result is the value of the probability *density* function
at the bin, normalized such that the *integral* over the range is 1. If
False, the result will contain the number of samples in each bin.
For more info check :class:`~bokeh.charts.stats.Histogram` documentation.
(default: False)
""")
def __init__(self, values, label=None, color=None, bins=None, **kwargs):
if label is not None:
kwargs['label'] = label
kwargs['values'] = values
if color is not None:
kwargs['color'] = color
# remove width, since this is handled automatically
kwargs.pop('width', None)
# keep original bins setting private since it just needs to be
# delegated to the Histogram stat
self._bins = bins
super(HistogramGlyph, self).__init__(**kwargs)
self.setup()
def _set_sources(self):
# No need to set sources, since composite glyphs handle this
pass
def build_source(self):
# No need to build source, since composite glyphs handle this
return None
def build_renderers(self):
"""Yield a bar glyph for each bin."""
# TODO(fpliger): We should expose the bin stat class so we could let
# users specify other bins other the Histogram Stat
self.bins = Histogram(values=self.values, bins=self._bins,
density=self.density)
bars = []
for bin in self.bins.bins:
bars.append(BarGlyph(label=bin.label[0], x_label=bin.center,
values=bin.values, color=self.color,
fill_alpha=self.fill_alpha,
agg=bin.stat, width=bin.width))
# provide access to bars as children for bounds properties
self.bars = self.children = bars
for comp_glyph in self.bars:
for renderer in comp_glyph.renderers:
yield renderer
@property
def y_min(self):
return 0.0
class BinGlyph(XyGlyph):
"""Represents a group of data that was aggregated and is represented by a glyph.
"""
bins = Instance(Bins)
column = String()
stat = String()
glyph_name = String()
width = Float()
height = Float()
def __init__(self, x, y, values, column=None, stat='count', glyph='rect', width=1,
height=1, **kwargs):
df = pd.DataFrame(dict(x_vals=x, y_vals=y, values_vals=values))
df.drop_duplicates(inplace=True)
kwargs['x'] = df.x_vals
kwargs['y'] = df.y_vals
kwargs['values'] = df.values_vals
kwargs['column'] = column
kwargs['stat'] = stat
kwargs['glyph_name'] = glyph
kwargs['height'] = height
kwargs['width'] = width
if 'glyphs' not in kwargs:
kwargs['glyphs'] = {'rect': Rect}
super(XyGlyph, self).__init__(**kwargs)
self.setup()
def build_source(self):
return {'x': self.x, 'y': self.y, 'values': self.values}
def build_renderers(self):
glyph_class = self.glyphs[self.glyph_name]
glyph = glyph_class(x='x', y='y', height=self.height, width=self.width,
fill_color=self.fill_color, line_color=self.line_color,
dilate=True)
yield GlyphRenderer(glyph=glyph)
@property
def x_max(self):
return self.get_data_range('x')[1] + self.width / 2.0
@property
def x_min(self):
return self.get_data_range('x')[0] - self.width / 2.0
@property
def y_max(self):
return self.get_data_range('y')[1] + self.height / 2.0
@property
def y_min(self):
return self.get_data_range('y')[0] - self.height / 2.0
def get_data_range(self, col):
data = self.source.data[col]
if ChartDataSource.is_number(data):
return min(data), max(data)
else:
return 1, len(data.drop_duplicates())
class ArcGlyph(LineGlyph):
"""Represents a group of data as an arc."""
start_angle = Angle()
end_angle = Angle()
def __init__(self, **kwargs):
super(self.__class__, self).__init__(**kwargs)
self.setup()
def build_renderers(self):
"""Yield a `GlyphRenderer` for the group of data."""
glyph = Arc(x='x', y='y', radius=1,
start_angle='_end_angle',
end_angle='_start_angle',
line_color='line_color')
yield GlyphRenderer(glyph=glyph)
| bsd-3-clause |
bestwpw/BDA_py_demos | demos_ch10/demo10_2.py | 19 | 1606 | """Bayesian data analysis
Chapter 10, demo 2
Importance sampling example
"""
from __future__ import division
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
# edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2, markeredgewidth=0)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
plt.rc('patch', facecolor='#bfe2ff')
# fake interesting distribution
x = np.linspace(-3, 3, 200)
r = np.array([ 1.1 , 1.3 , -0.1 , -0.7 , 0.2 , -0.4 , 0.06, -1.7 ,
1.7 , 0.3 , 0.7 , 1.6 , -2.06, -0.74, 0.2 , 0.5 ])
# Estimate the density (named q, to emphesize that it does not need to be
# normalized). Parameter bw_method=0.48 is used to mimic the outcome of the
# kernelp function in Matlab.
q_func = stats.gaussian_kde(r, bw_method=0.48)
q = q_func.evaluate(x)
# importance sampling example
g = stats.norm.pdf(x)
w = q/g
r = np.random.randn(100)
r = r[np.abs(r) < 3] # remove samples out of the grid
wr = q_func.evaluate(r)/stats.norm.pdf(r)
# plot
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(10,8))
axes[0].plot(x, q, label=r'$q(\theta|y)$')
axes[0].plot(x, g, label=r'$g(\theta)$')
axes[0].set_yticks(())
axes[0].set_title('target and proposal distributions')
axes[0].legend()
axes[1].plot(x, w, label=r'$q(\theta|y)/g(\theta)$')
axes[1].set_title('samples and importance weights')
axes[1].vlines(r, 0, wr, color='#377eb8', alpha=0.4)
axes[1].set_ylim((0,axes[1].get_ylim()[1]))
axes[1].legend()
plt.show()
| gpl-3.0 |
thp44/delphin_6_automation | data_process/wp6_run/sim_reports/utils.py | 1 | 7429 | __author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import os
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = [10, 5]
import datetime
from scipy.optimize import curve_fit
# RiBuild Modules
from delphin_6_automation.database_interactions.db_templates import delphin_entry
from delphin_6_automation.database_interactions.db_templates import result_processed_entry
from delphin_6_automation.database_interactions.db_templates import sample_entry
from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import auth_dict
from delphin_6_automation.database_interactions import simulation_interactions
from delphin_6_automation.backend import result_extraction
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
figure_size = (20, 10)
def get_time(projects):
time = []
for p in projects:
time.append(p.simulation_time)
return np.array(time) / 60
def get_simulation_time():
filtered_entries = delphin_entry.Delphin.objects(simulated__exists=True).only('simulation_time')
gt_250 = filtered_entries.filter(simulation_time__gt=15000)
sim_time = get_time(filtered_entries)
return {
'min': np.min(sim_time),
'q25': np.quantile(sim_time, 0.25),
'mean': np.mean(sim_time),
'median': np.quantile(sim_time, 0.50),
'q75': np.quantile(sim_time, 0.75),
'q95': np.quantile(sim_time, 0.95),
'max': np.max(sim_time),
'gt_250': gt_250.count()
}
def get_simulation_time_plot():
filtered_entries = delphin_entry.Delphin.objects(simulated__exists=True).only('simulation_time')
sim_time = get_time(filtered_entries)
hist, edges = np.histogram(sim_time, density=True, bins=200)
dx = edges[1] - edges[0]
cdf = np.cumsum(hist) * dx
plt.figure(figsize=figure_size)
plt.plot(edges[1:], cdf, color='darkslateblue')
plt.axvline(x=np.mean(sim_time), linestyle=':', color='k', label='Mean')
plt.axvline(x=np.median(sim_time), linestyle='--', color='k', label='Median')
plt.title('Simulation Time')
plt.xlabel('Simulation Time in Minutes')
plt.ylabel('Ratio of Simulations')
plt.legend()
plt.xlim(-5, 260)
plt.show()
def get_actual_average_simulation_time():
start_time = delphin_entry.Delphin.objects(simulated__exists=True).only('simulated').order_by('simulated').first()['simulated']
delta_time = (datetime.datetime.now() - start_time).total_seconds()/60
return get_simulated_projects_count()/delta_time
def get_convergence_mould():
strategy = sample_entry.Strategy.objects().only('standard_error').first()
mould = []
for design in strategy.standard_error:
mould.append(strategy.standard_error[design]['mould'])
mould = np.array(mould)
mould_avg = np.nanmean(mould, axis=0)
mould_min = np.nanmin(mould, axis=0)
mould_max = np.nanmax(mould, axis=0)
mould_q25 = np.nanquantile(mould, 0.25, axis=0)
mould_q50 = np.nanquantile(mould, 0.50, axis=0)
mould_q75 = np.nanquantile(mould, 0.75, axis=0)
x = np.arange(0, len(mould_avg))
plt.figure(figsize=figure_size)
plt.plot(x, mould_avg, color='k', label='Average Absolute Error')
plt.plot(x, mould_q25, color='#721616', label='Q25 Absolute Error')
plt.plot(x, mould_q50, color='#d73030', label='Median Absolute Error')
plt.plot(x, mould_q75, color='#db4545', label='Q75 Absolute Error')
plt.fill_between(x, mould_min, mould_max, alpha=0.7, color='firebrick', label='Max-Min Absolute Error')
plt.axhline(y=0.1, linestyle=':', color='k', label='Convergence Criteria - 10%')
plt.ylabel('Absolute Error')
plt.xlabel('Sample Iteration')
plt.title('Convergence - Mould')
plt.legend()
plt.show()
return {
'min': mould_min,
'q25': mould_q25,
'mean': mould_avg,
'median': mould_q50,
'q75': mould_q75,
'max': mould_max,
}
def get_convergence_heatloss(mode=None):
strategy = sample_entry.Strategy.objects().only('standard_error').first()
hl = []
for design in strategy.standard_error:
hl.append(strategy.standard_error[design]['heat_loss'])
hl = np.array(hl)
hl_avg = np.nanmean(hl, axis=0)
hl_min = np.nanmin(hl, axis=0)
hl_max = np.nanmax(hl, axis=0)
hl_q25 = np.nanquantile(hl, 0.25, axis=0)
hl_q50 = np.nanquantile(hl, 0.50, axis=0)
hl_q75 = np.nanquantile(hl, 0.75, axis=0)
x = np.arange(0, len(hl_avg))
plt.figure(figsize=figure_size)
plt.plot(x, hl_avg, color='k', label='Average Absolute Error')
plt.plot(x, hl_q25, color='#0000b3', label='Q25 Absolute Error')
plt.plot(x, hl_q50, color='#3333ff', label='Median Absolute Error')
plt.plot(x, hl_q75, color='#4d4dff', label='Q75 Absolute Error')
plt.fill_between(x, hl_min, hl_max, alpha=0.7, color='darkslateblue', label='Max-Min Absolute Error')
plt.axhline(y=0.1, linestyle=':', color='k', label='Convergence Criteria - 10%')
plt.ylabel('Absolute Error')
plt.xlabel('Sample Iteration')
plt.title('Convergence - Heat Loss')
if not mode:
plt.ylim(-0.5, 1.2)
plt.legend()
plt.show()
return {
'min': hl_min,
'q25': hl_q25,
'mean': hl_avg,
'median': hl_q50,
'q75': hl_q75,
'max': hl_max,
}
def get_mould_cdf():
results = result_processed_entry.ProcessedResult.objects.only('thresholds.mould')
x, y = compute_cdf(results, 'mould')
plt.figure(figsize=figure_size)
plt.plot(x, y)
plt.title('Cumulative Distribution Function\nMould')
plt.xlabel('Mould Index')
plt.ylabel('Ratio')
#plt.show()
def get_heatloss_cdf():
results = result_processed_entry.ProcessedResult.objects.only('thresholds.heat_loss')
x, y = compute_cdf(results, 'heat_loss')
plt.figure(figsize=figure_size)
plt.plot(x, y)
plt.title('Cumulative Distribution Function\nHeat Loss')
plt.xlabel('Wh')
plt.ylabel('Ratio')
plt.xlim(0, 1.3*10**8)
#plt.show()
def compute_cdf(results: list, quantity: str):
quantities = [doc.thresholds[quantity] for doc in results]
hist, edges = np.histogram(quantities, density=True, bins=50)
dx = edges[1] - edges[0]
cdf = np.cumsum(hist) * dx
return edges[1:], cdf
def get_simulated_projects_count():
return delphin_entry.Delphin.objects(simulated__exists=True).count()
def estimate_future_convergence(data, dmg_model, max_):
def curve_func(x, a, b):
return a * x + b
plt.figure(figsize=figure_size)
plt.title(f'Convergence Estimation\n{dmg_model.upper()}')
colors = {'mould': {'mean': 'k', 'q25': '#721616', 'median': '#d73030', 'q75': '#db4545'},
'heat loss': {'mean': 'k', 'q25': '#0000b3', 'median': '#3333ff', 'q75': '#4d4dff'}}
for key in data.keys():
if key in ['min', 'max']:
continue
else:
popt, pcov = curve_fit(curve_func, np.arange(0, len(data[key])), data[key])
xdata = np.linspace(0, max_)
plt.plot(xdata, curve_func(xdata, *popt), colors[dmg_model][key], label=f'{key.upper()}')
plt.legend() | mit |
TengdaHan/Convolutional_Sketch_Inversion | src/utils/simple_utils.py | 1 | 1499 | import matplotlib.pyplot as plt
import numpy as np
def plot_batch_train(model, img_size, batch_size, sketch, color, epoch, idx, tag, nb_img=5):
img_sketch = np.array(sketch[0:nb_img])
img_color = np.array(color[0:nb_img])
img_gen = model.predict(sketch, batch_size=batch_size)[0][0:nb_img]
for i in range(nb_img):
plt.subplot(nb_img, 3, i * 3 + 1)
plt.imshow(img_sketch[i].reshape((img_size,img_size)), cmap='Greys_r')
plt.axis('off')
plt.subplot(nb_img, 3, i * 3 + 2)
plt.imshow(img_color[i])
plt.axis('off')
plt.subplot(nb_img, 3, i * 3 + 3)
plt.imshow(img_gen[i])
plt.axis('off')
plt.savefig("figures/%s_fig_epoch%s_idx%s.png" % (tag, epoch, idx))
plt.clf()
plt.close()
def plot_batch_eval(model, img_size, batch_size, sketch, tag, nb_img=16):
img_sketch = np.array(sketch[0:nb_img])
img_gen = model.predict(sketch, batch_size=batch_size)[0][0:nb_img]
for i in range(nb_img):
plt.subplot(4, 4, i + 1)
if i % 2 == 0:
plt.imshow(img_sketch[i].squeeze(), cmap='Greys_r')
else:
plt.imshow(img_gen[i-1])
plt.axis('off')
# plt.subplot(nb_img, 2, i * 2 + 1)
# plt.imshow(img_sketch[i].reshape((img_size,img_size)), cmap='Greys_r')
#
# plt.subplot(nb_img, 2, i * 2 + 2)
# plt.imshow(img_gen[i])
# plt.axis('off')
plt.savefig("figures/%s.png" % tag)
plt.clf()
plt.close()
| mit |
wdurhamh/statsmodels | examples/python/tsa_filters.py | 34 | 4559 |
## Time Series Filters
from __future__ import print_function
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
dta = sm.datasets.macrodata.load_pandas().data
index = pd.Index(sm.tsa.datetools.dates_from_range('1959Q1', '2009Q3'))
print(index)
dta.index = index
del dta['year']
del dta['quarter']
print(sm.datasets.macrodata.NOTE)
print(dta.head(10))
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
dta.realgdp.plot(ax=ax);
legend = ax.legend(loc = 'upper left');
legend.prop.set_size(20);
#### Hodrick-Prescott Filter
# The Hodrick-Prescott filter separates a time-series $y_t$ into a trend $\tau_t$ and a cyclical component $\zeta_t$
#
# $$y_t = \tau_t + \zeta_t$$
#
# The components are determined by minimizing the following quadratic loss function
#
# $$\min_{\\{ \tau_{t}\\} }\sum_{t}^{T}\zeta_{t}^{2}+\lambda\sum_{t=1}^{T}\left[\left(\tau_{t}-\tau_{t-1}\right)-\left(\tau_{t-1}-\tau_{t-2}\right)\right]^{2}$$
gdp_cycle, gdp_trend = sm.tsa.filters.hpfilter(dta.realgdp)
gdp_decomp = dta[['realgdp']]
gdp_decomp["cycle"] = gdp_cycle
gdp_decomp["trend"] = gdp_trend
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
gdp_decomp[["realgdp", "trend"]]["2000-03-31":].plot(ax=ax, fontsize=16);
legend = ax.get_legend()
legend.prop.set_size(20);
#### Baxter-King approximate band-pass filter: Inflation and Unemployment
##### Explore the hypothesis that inflation and unemployment are counter-cyclical.
# The Baxter-King filter is intended to explictly deal with the periodicty of the business cycle. By applying their band-pass filter to a series, they produce a new series that does not contain fluctuations at higher or lower than those of the business cycle. Specifically, the BK filter takes the form of a symmetric moving average
#
# $$y_{t}^{*}=\sum_{k=-K}^{k=K}a_ky_{t-k}$$
#
# where $a_{-k}=a_k$ and $\sum_{k=-k}^{K}a_k=0$ to eliminate any trend in the series and render it stationary if the series is I(1) or I(2).
#
# For completeness, the filter weights are determined as follows
#
# $$a_{j} = B_{j}+\theta\text{ for }j=0,\pm1,\pm2,\dots,\pm K$$
#
# $$B_{0} = \frac{\left(\omega_{2}-\omega_{1}\right)}{\pi}$$
# $$B_{j} = \frac{1}{\pi j}\left(\sin\left(\omega_{2}j\right)-\sin\left(\omega_{1}j\right)\right)\text{ for }j=0,\pm1,\pm2,\dots,\pm K$$
#
# where $\theta$ is a normalizing constant such that the weights sum to zero.
#
# $$\theta=\frac{-\sum_{j=-K^{K}b_{j}}}{2K+1}$$
#
# $$\omega_{1}=\frac{2\pi}{P_{H}}$$
#
# $$\omega_{2}=\frac{2\pi}{P_{L}}$$
#
# $P_L$ and $P_H$ are the periodicity of the low and high cut-off frequencies. Following Burns and Mitchell's work on US business cycles which suggests cycles last from 1.5 to 8 years, we use $P_L=6$ and $P_H=32$ by default.
bk_cycles = sm.tsa.filters.bkfilter(dta[["infl","unemp"]])
# * We lose K observations on both ends. It is suggested to use K=12 for quarterly data.
fig = plt.figure(figsize=(14,10))
ax = fig.add_subplot(111)
bk_cycles.plot(ax=ax, style=['r--', 'b-']);
#### Christiano-Fitzgerald approximate band-pass filter: Inflation and Unemployment
# The Christiano-Fitzgerald filter is a generalization of BK and can thus also be seen as weighted moving average. However, the CF filter is asymmetric about $t$ as well as using the entire series. The implementation of their filter involves the
# calculations of the weights in
#
# $$y_{t}^{*}=B_{0}y_{t}+B_{1}y_{t+1}+\dots+B_{T-1-t}y_{T-1}+\tilde B_{T-t}y_{T}+B_{1}y_{t-1}+\dots+B_{t-2}y_{2}+\tilde B_{t-1}y_{1}$$
#
# for $t=3,4,...,T-2$, where
#
# $$B_{j} = \frac{\sin(jb)-\sin(ja)}{\pi j},j\geq1$$
#
# $$B_{0} = \frac{b-a}{\pi},a=\frac{2\pi}{P_{u}},b=\frac{2\pi}{P_{L}}$$
#
# $\tilde B_{T-t}$ and $\tilde B_{t-1}$ are linear functions of the $B_{j}$'s, and the values for $t=1,2,T-1,$ and $T$ are also calculated in much the same way. $P_{U}$ and $P_{L}$ are as described above with the same interpretation.
# The CF filter is appropriate for series that may follow a random walk.
print(sm.tsa.stattools.adfuller(dta['unemp'])[:3])
print(sm.tsa.stattools.adfuller(dta['infl'])[:3])
cf_cycles, cf_trend = sm.tsa.filters.cffilter(dta[["infl","unemp"]])
print(cf_cycles.head(10))
fig = plt.figure(figsize=(14,10))
ax = fig.add_subplot(111)
cf_cycles.plot(ax=ax, style=['r--','b-']);
# Filtering assumes *a priori* that business cycles exist. Due to this assumption, many macroeconomic models seek to create models that match the shape of impulse response functions rather than replicating properties of filtered series. See VAR notebook.
| bsd-3-clause |
great-expectations/great_expectations | great_expectations/cli/datasource.py | 1 | 28903 | import enum
import logging
import os
import sys
from typing import Optional, Union
import click
from great_expectations import DataContext
from great_expectations.cli import toolkit
from great_expectations.cli.pretty_printing import cli_message, cli_message_dict
from great_expectations.cli.util import verify_library_dependent_modules
from great_expectations.data_context.templates import YAMLToString
from great_expectations.datasource.types import DatasourceTypes
from great_expectations.render.renderer.datasource_new_notebook_renderer import (
DatasourceNewNotebookRenderer,
)
logger = logging.getLogger(__name__)
try:
import sqlalchemy
except ImportError:
logger.debug(
"Unable to load SqlAlchemy context; install optional sqlalchemy dependency for support"
)
sqlalchemy = None
yaml = YAMLToString()
yaml.indent(mapping=2, sequence=4, offset=2)
yaml.default_flow_style = False
class SupportedDatabaseBackends(enum.Enum):
MYSQL = "MySQL"
POSTGRES = "Postgres"
REDSHIFT = "Redshift"
SNOWFLAKE = "Snowflake"
BIGQUERY = "BigQuery"
OTHER = "other - Do you have a working SQLAlchemy connection string?"
# TODO MSSQL
@click.group()
@click.pass_context
def datasource(ctx):
"""Datasource operations"""
directory: str = toolkit.parse_cli_config_file_location(
config_file_location=ctx.obj.config_file_location
).get("directory")
context: DataContext = toolkit.load_data_context_with_error_handling(
directory=directory,
from_cli_upgrade_command=False,
)
# TODO consider moving this all the way up in to the CLIState constructor
ctx.obj.data_context = context
usage_stats_prefix = f"cli.datasource.{ctx.invoked_subcommand}"
toolkit.send_usage_message(
data_context=context,
event=f"{usage_stats_prefix}.begin",
success=True,
)
ctx.obj.usage_event_end = f"{usage_stats_prefix}.end"
@datasource.command(name="new")
@click.pass_context
@click.option("--name", default=None, help="Datasource name.")
@click.option(
"--jupyter/--no-jupyter",
is_flag=True,
help="By default launch jupyter notebooks unless you specify the --no-jupyter flag",
default=True,
)
def datasource_new(ctx, name, jupyter):
"""Add a new Datasource to the data context."""
context: DataContext = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
try:
_datasource_new_flow(
context,
usage_event_end=usage_event_end,
datasource_name=name,
jupyter=jupyter,
)
except Exception as e:
toolkit.exit_with_failure_message_and_stats(
context=context,
usage_event=usage_event_end,
message=f"<red>{e}</red>",
)
return
@datasource.command(name="delete")
@click.argument("datasource")
@click.pass_context
def delete_datasource(ctx, datasource):
"""Delete the datasource specified as an argument"""
context: DataContext = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
if not ctx.obj.assume_yes:
toolkit.confirm_proceed_or_exit(
confirm_prompt=f"""\nAre you sure you want to delete the Datasource "{datasource}" (this action is irreversible)?" """,
continuation_message=f"Datasource `{datasource}` was not deleted.",
exit_on_no=True,
data_context=context,
usage_stats_event=usage_event_end,
)
try:
context.delete_datasource(datasource)
except ValueError:
cli_message(f"<red>Datasource {datasource} could not be found.</red>")
toolkit.send_usage_message(context, event=usage_event_end, success=False)
sys.exit(1)
try:
context.get_datasource(datasource)
except ValueError:
cli_message("<green>{}</green>".format("Datasource deleted successfully."))
toolkit.send_usage_message(context, event=usage_event_end, success=True)
sys.exit(0)
@datasource.command(name="list")
@click.pass_context
def datasource_list(ctx):
"""List known Datasources."""
context = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
try:
datasources = context.list_datasources()
cli_message(_build_datasource_intro_string(datasources))
for datasource in datasources:
cli_message("")
cli_message_dict(
{
"name": datasource["name"],
"class_name": datasource["class_name"],
}
)
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=True
)
except Exception as e:
toolkit.exit_with_failure_message_and_stats(
context=context,
usage_event=usage_event_end,
message=f"<red>{e}</red>",
)
return
def _build_datasource_intro_string(datasources):
datasource_count = len(datasources)
if datasource_count == 0:
return "No Datasources found"
elif datasource_count == 1:
return "1 Datasource found:"
return f"{datasource_count} Datasources found:"
def _datasource_new_flow(
context: DataContext,
usage_event_end: str,
datasource_name: Optional[str] = None,
jupyter: bool = True,
) -> None:
files_or_sql_selection = click.prompt(
"""
What data would you like Great Expectations to connect to?
1. Files on a filesystem (for processing with Pandas or Spark)
2. Relational database (SQL)
""",
type=click.Choice(["1", "2"]),
show_choices=False,
)
if files_or_sql_selection == "1":
selected_files_backend = _prompt_for_execution_engine()
helper = _get_files_helper(
selected_files_backend,
context_root_dir=context.root_directory,
datasource_name=datasource_name,
)
elif files_or_sql_selection == "2":
if not _verify_sqlalchemy_dependent_modules():
return None
selected_database = _prompt_user_for_database_backend()
helper = _get_sql_yaml_helper_class(selected_database, datasource_name)
helper.send_backend_choice_usage_message(context)
if not helper.verify_libraries_installed():
return None
helper.prompt()
notebook_path = helper.create_notebook(context)
if jupyter is False:
cli_message(
f"To continue editing this Datasource, run <green>jupyter notebook {notebook_path}</green>"
)
toolkit.send_usage_message(context, event=usage_event_end, success=True)
return None
if notebook_path:
cli_message(
"""<green>Because you requested to create a new Datasource, we'll open a notebook for you now to complete it!</green>\n\n"""
)
toolkit.send_usage_message(context, event=usage_event_end, success=True)
toolkit.launch_jupyter_notebook(notebook_path)
class BaseDatasourceNewYamlHelper:
"""
This base class defines the interface for helpers used in the datasource new
flow.
"""
def __init__(
self,
datasource_type: DatasourceTypes,
usage_stats_payload: dict,
datasource_name: Optional[str] = None,
):
self.datasource_type: DatasourceTypes = datasource_type
self.datasource_name: Optional[str] = datasource_name
self.usage_stats_payload: dict = usage_stats_payload
def verify_libraries_installed(self) -> bool:
"""Used in the interactive CLI to help users install dependencies."""
raise NotImplementedError
def create_notebook(self, context: DataContext) -> str:
"""Create a datasource_new notebook and save it to disk."""
renderer = self.get_notebook_renderer(context)
notebook_path = os.path.join(
context.root_directory,
context.GE_UNCOMMITTED_DIR,
"datasource_new.ipynb",
)
renderer.render_to_disk(notebook_path)
return notebook_path
def get_notebook_renderer(self, context) -> DatasourceNewNotebookRenderer:
"""Get a renderer specifically constructed for the datasource type."""
raise NotImplementedError
def send_backend_choice_usage_message(self, context: DataContext) -> None:
toolkit.send_usage_message(
data_context=context,
event="cli.new_ds_choice",
event_payload={
"type": self.datasource_type.value,
**self.usage_stats_payload,
},
success=True,
)
def prompt(self) -> None:
"""Optional prompt if more information is needed before making a notebook."""
pass
def yaml_snippet(self) -> str:
"""Override to create the yaml for the notebook."""
raise NotImplementedError
class FilesYamlHelper(BaseDatasourceNewYamlHelper):
"""The base class for pandas/spark helpers used in the datasource new flow."""
def __init__(
self,
datasource_type: DatasourceTypes,
usage_stats_payload: dict,
class_name: str,
context_root_dir: str,
datasource_name: Optional[str] = None,
):
super().__init__(datasource_type, usage_stats_payload, datasource_name)
self.class_name: str = class_name
self.base_path: str = ""
self.context_root_dir: str = context_root_dir
def get_notebook_renderer(self, context) -> DatasourceNewNotebookRenderer:
return DatasourceNewNotebookRenderer(
context,
datasource_type=self.datasource_type,
datasource_yaml=self.yaml_snippet(),
datasource_name=self.datasource_name,
)
def yaml_snippet(self) -> str:
"""
Note the InferredAssetFilesystemDataConnector was selected to get users
to data assets with minimal configuration. Other DataConnectors are
available.
"""
return f'''f"""
name: {{datasource_name}}
class_name: Datasource
execution_engine:
class_name: {self.class_name}
data_connectors:
default_inferred_data_connector_name:
class_name: InferredAssetFilesystemDataConnector
base_directory: {self.base_path}
default_regex:
group_names:
- data_asset_name
pattern: (.*)
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
"""'''
def prompt(self) -> None:
file_url_or_path: str = click.prompt(PROMPT_FILES_BASE_PATH, type=click.Path())
if not toolkit.is_cloud_file_url(file_url_or_path):
file_url_or_path = toolkit.get_relative_path_from_config_file_to_base_path(
self.context_root_dir, file_url_or_path
)
self.base_path = file_url_or_path
class PandasYamlHelper(FilesYamlHelper):
def __init__(
self,
context_root_dir: str,
datasource_name: Optional[str] = None,
):
super().__init__(
datasource_type=DatasourceTypes.PANDAS,
usage_stats_payload={
"type": DatasourceTypes.PANDAS.value,
"api_version": "v3",
},
context_root_dir=context_root_dir,
class_name="PandasExecutionEngine",
datasource_name=datasource_name,
)
def verify_libraries_installed(self) -> bool:
return True
class SparkYamlHelper(FilesYamlHelper):
def __init__(
self,
context_root_dir: str,
datasource_name: Optional[str] = None,
):
super().__init__(
datasource_type=DatasourceTypes.SPARK,
usage_stats_payload={
"type": DatasourceTypes.SPARK.value,
"api_version": "v3",
},
context_root_dir=context_root_dir,
class_name="SparkDFExecutionEngine",
datasource_name=datasource_name,
)
def verify_libraries_installed(self) -> bool:
return verify_library_dependent_modules(
python_import_name="pyspark", pip_library_name="pyspark"
)
class SQLCredentialYamlHelper(BaseDatasourceNewYamlHelper):
"""The base class for SQL helpers used in the datasource new flow."""
def __init__(
self,
usage_stats_payload: dict,
datasource_name: Optional[str] = None,
driver: str = "",
port: Union[int, str] = "YOUR_PORT",
host: str = "YOUR_HOST",
username: str = "YOUR_USERNAME",
password: str = "YOUR_PASSWORD",
database: str = "YOUR_DATABASE",
):
super().__init__(
datasource_type=DatasourceTypes.SQL,
usage_stats_payload=usage_stats_payload,
datasource_name=datasource_name,
)
self.driver = driver
self.host = host
self.port = str(port)
self.username = username
self.password = password
self.database = database
def credentials_snippet(self) -> str:
return f'''\
host = "{self.host}"
port = "{self.port}"
username = "{self.username}"
password = "{self.password}"
database = "{self.database}"'''
def yaml_snippet(self) -> str:
yaml_str = '''f"""
name: {datasource_name}
class_name: Datasource
execution_engine:
class_name: SqlAlchemyExecutionEngine'''
yaml_str += self._yaml_innards()
if self.driver:
yaml_str += f"""
drivername: {self.driver}"""
yaml_str += '''
data_connectors:
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
default_inferred_data_connector_name:
class_name: InferredAssetSqlDataConnector
name: whole_table"""'''
return yaml_str
def _yaml_innards(self) -> str:
"""Override if needed."""
return """
credentials:
host: {host}
port: '{port}'
username: {username}
password: {password}
database: {database}"""
def get_notebook_renderer(self, context) -> DatasourceNewNotebookRenderer:
return DatasourceNewNotebookRenderer(
context,
datasource_type=self.datasource_type,
datasource_yaml=self.yaml_snippet(),
datasource_name=self.datasource_name,
sql_credentials_snippet=self.credentials_snippet(),
)
class MySQLCredentialYamlHelper(SQLCredentialYamlHelper):
def __init__(self, datasource_name: Optional[str]):
# We are insisting on pymysql driver when adding a MySQL datasource
# through the CLI to avoid over-complication of this flow. If user wants
# to use another driver, they must use a sqlalchemy connection string.
super().__init__(
datasource_name=datasource_name,
usage_stats_payload={
"type": DatasourceTypes.SQL.value,
"db": SupportedDatabaseBackends.MYSQL.value,
"api_version": "v3",
},
driver="mysql+pymysql",
port=3306,
)
def verify_libraries_installed(self) -> bool:
return verify_library_dependent_modules(
python_import_name="pymysql",
pip_library_name="pymysql",
module_names_to_reload=CLI_ONLY_SQLALCHEMY_ORDERED_DEPENDENCY_MODULE_NAMES,
)
class PostgresCredentialYamlHelper(SQLCredentialYamlHelper):
def __init__(self, datasource_name: Optional[str]):
super().__init__(
datasource_name=datasource_name,
usage_stats_payload={
"type": "sqlalchemy",
"db": SupportedDatabaseBackends.POSTGRES.value,
"api_version": "v3",
},
driver="postgresql",
port=5432,
database="postgres",
)
def verify_libraries_installed(self) -> bool:
psycopg2_success: bool = verify_library_dependent_modules(
python_import_name="psycopg2",
pip_library_name="psycopg2-binary",
module_names_to_reload=CLI_ONLY_SQLALCHEMY_ORDERED_DEPENDENCY_MODULE_NAMES,
)
# noinspection SpellCheckingInspection
postgresql_psycopg2_success: bool = verify_library_dependent_modules(
python_import_name="sqlalchemy.dialects.postgresql.psycopg2",
pip_library_name="psycopg2-binary",
module_names_to_reload=CLI_ONLY_SQLALCHEMY_ORDERED_DEPENDENCY_MODULE_NAMES,
)
return psycopg2_success and postgresql_psycopg2_success
class RedshiftCredentialYamlHelper(SQLCredentialYamlHelper):
def __init__(self, datasource_name: Optional[str]):
# We are insisting on psycopg2 driver when adding a Redshift datasource
# through the CLI to avoid over-complication of this flow. If user wants
# to use another driver, they must use a sqlalchemy connection string.
super().__init__(
datasource_name=datasource_name,
usage_stats_payload={
"type": "sqlalchemy",
"db": SupportedDatabaseBackends.REDSHIFT.value,
"api_version": "v3",
},
driver="postgresql+psycopg2",
port=5439,
)
def verify_libraries_installed(self) -> bool:
# noinspection SpellCheckingInspection
psycopg2_success: bool = verify_library_dependent_modules(
python_import_name="psycopg2",
pip_library_name="psycopg2-binary",
module_names_to_reload=CLI_ONLY_SQLALCHEMY_ORDERED_DEPENDENCY_MODULE_NAMES,
)
# noinspection SpellCheckingInspection
postgresql_psycopg2_success: bool = verify_library_dependent_modules(
python_import_name="sqlalchemy.dialects.postgresql.psycopg2",
pip_library_name="psycopg2-binary",
module_names_to_reload=CLI_ONLY_SQLALCHEMY_ORDERED_DEPENDENCY_MODULE_NAMES,
)
postgresql_success: bool = psycopg2_success and postgresql_psycopg2_success
redshift_success: bool = verify_library_dependent_modules(
python_import_name="sqlalchemy_redshift.dialect",
pip_library_name="sqlalchemy-redshift",
module_names_to_reload=CLI_ONLY_SQLALCHEMY_ORDERED_DEPENDENCY_MODULE_NAMES,
)
return redshift_success or postgresql_success
def _yaml_innards(self) -> str:
return (
super()._yaml_innards()
+ """
query:
sslmode: prefer"""
)
class SnowflakeAuthMethod(enum.IntEnum):
USER_AND_PASSWORD = 0
SSO = 1
KEY_PAIR = 2
class SnowflakeCredentialYamlHelper(SQLCredentialYamlHelper):
def __init__(self, datasource_name: Optional[str]):
super().__init__(
datasource_name=datasource_name,
usage_stats_payload={
"type": "sqlalchemy",
"db": SupportedDatabaseBackends.SNOWFLAKE.value,
"api_version": "v3",
},
driver="snowflake",
)
self.auth_method = SnowflakeAuthMethod.USER_AND_PASSWORD
def verify_libraries_installed(self) -> bool:
return verify_library_dependent_modules(
python_import_name="snowflake.sqlalchemy.snowdialect",
pip_library_name="snowflake-sqlalchemy",
module_names_to_reload=CLI_ONLY_SQLALCHEMY_ORDERED_DEPENDENCY_MODULE_NAMES,
)
def prompt(self) -> None:
self.auth_method = _prompt_for_snowflake_auth_method()
def credentials_snippet(self) -> str:
snippet = f"""\
host = "{self.host}" # The account name (include region -- ex 'ABCD.us-east-1')
username = "{self.username}"
database = "" # The database name (optional -- leave blank for none)
schema = "" # The schema name (optional -- leave blank for none)
warehouse = "" # The warehouse name (optional -- leave blank for none)
role = "" # The role name (optional -- leave blank for none)"""
if self.auth_method == SnowflakeAuthMethod.USER_AND_PASSWORD:
snippet += '''
password = "YOUR_PASSWORD"'''
elif self.auth_method == SnowflakeAuthMethod.SSO:
snippet += """
authenticator_url = "externalbrowser" # A valid okta URL or 'externalbrowser' used to connect through SSO"""
elif self.auth_method == SnowflakeAuthMethod.KEY_PAIR:
snippet += """
private_key_path = "YOUR_KEY_PATH" # Path to the private key used for authentication
private_key_passphrase = "" # Passphrase for the private key used for authentication (optional -- leave blank for none)"""
return snippet
def _yaml_innards(self) -> str:
snippet = """
credentials:
host: {host}
username: {username}
database: {database}
query:
schema: {schema}
warehouse: {warehouse}
role: {role}
"""
if self.auth_method == SnowflakeAuthMethod.USER_AND_PASSWORD:
snippet += " password: {password}"
elif self.auth_method == SnowflakeAuthMethod.SSO:
snippet += """\
connect_args:
authenticator: {authenticator_url}"""
elif self.auth_method == SnowflakeAuthMethod.KEY_PAIR:
snippet += """\
private_key_path: {private_key_path}
private_key_passphrase: {private_key_passphrase}"""
return snippet
class BigqueryCredentialYamlHelper(SQLCredentialYamlHelper):
def __init__(self, datasource_name: Optional[str]):
super().__init__(
datasource_name=datasource_name,
usage_stats_payload={
"type": "sqlalchemy",
"db": "BigQuery",
"api_version": "v3",
},
)
def credentials_snippet(self) -> str:
return '''\
# The SQLAlchemy url/connection string for the BigQuery connection
# (reference: https://github.com/mxmzdlv/pybigquery#connection-string-parameters)"""
connection_string = "YOUR_BIGQUERY_CONNECTION_STRING"'''
def verify_libraries_installed(self) -> bool:
return verify_library_dependent_modules(
python_import_name="pybigquery.sqlalchemy_bigquery",
pip_library_name="pybigquery",
module_names_to_reload=CLI_ONLY_SQLALCHEMY_ORDERED_DEPENDENCY_MODULE_NAMES,
)
def _yaml_innards(self) -> str:
return "\n connection_string: {connection_string}"
class ConnectionStringCredentialYamlHelper(SQLCredentialYamlHelper):
def __init__(self, datasource_name: Optional[str]):
super().__init__(
datasource_name=datasource_name,
usage_stats_payload={
"type": "sqlalchemy",
"db": "other",
"api_version": "v3",
},
)
def verify_libraries_installed(self) -> bool:
return True
def credentials_snippet(self) -> str:
return '''\
# The url/connection string for the sqlalchemy connection
# (reference: https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls)
connection_string = "YOUR_CONNECTION_STRING"'''
def _yaml_innards(self) -> str:
return "\n connection_string: {connection_string}"
def _get_sql_yaml_helper_class(
selected_database: SupportedDatabaseBackends, datasource_name: Optional[str]
) -> Union[
MySQLCredentialYamlHelper,
PostgresCredentialYamlHelper,
RedshiftCredentialYamlHelper,
SnowflakeCredentialYamlHelper,
BigqueryCredentialYamlHelper,
ConnectionStringCredentialYamlHelper,
]:
helper_class_by_backend = {
SupportedDatabaseBackends.POSTGRES: PostgresCredentialYamlHelper,
SupportedDatabaseBackends.MYSQL: MySQLCredentialYamlHelper,
SupportedDatabaseBackends.REDSHIFT: RedshiftCredentialYamlHelper,
SupportedDatabaseBackends.SNOWFLAKE: SnowflakeCredentialYamlHelper,
SupportedDatabaseBackends.BIGQUERY: BigqueryCredentialYamlHelper,
SupportedDatabaseBackends.OTHER: ConnectionStringCredentialYamlHelper,
}
helper_class = helper_class_by_backend[selected_database]
return helper_class(datasource_name)
def _prompt_for_execution_engine() -> str:
selection = str(
click.prompt(
"""
What are you processing your files with?
1. Pandas
2. PySpark
""",
type=click.Choice(["1", "2"]),
show_choices=False,
)
)
return selection
def _get_files_helper(
selection: str, context_root_dir: str, datasource_name: Optional[str] = None
) -> Union[PandasYamlHelper, SparkYamlHelper,]:
helper_class_by_selection = {
"1": PandasYamlHelper,
"2": SparkYamlHelper,
}
helper_class = helper_class_by_selection[selection]
return helper_class(context_root_dir, datasource_name)
def _prompt_user_for_database_backend() -> SupportedDatabaseBackends:
enumerated_list = "\n".join(
[f" {i}. {db.value}" for i, db in enumerate(SupportedDatabaseBackends, 1)]
)
msg_prompt_choose_database = f"""
Which database backend are you using?
{enumerated_list}
"""
db_choices = [str(x) for x in list(range(1, 1 + len(SupportedDatabaseBackends)))]
selected_database_index = (
int(
click.prompt(
msg_prompt_choose_database,
type=click.Choice(db_choices),
show_choices=False,
)
)
- 1
) # don't show user a zero index list :)
selected_database = list(SupportedDatabaseBackends)[selected_database_index]
return selected_database
def _prompt_for_snowflake_auth_method() -> SnowflakeAuthMethod:
auth_method = click.prompt(
"""\
What authentication method would you like to use?
1. User and Password
2. Single sign-on (SSO)
3. Key pair authentication
""",
type=click.Choice(["1", "2", "3"]),
show_choices=False,
)
return SnowflakeAuthMethod(int(auth_method) - 1)
def _verify_sqlalchemy_dependent_modules() -> bool:
return verify_library_dependent_modules(
python_import_name="sqlalchemy", pip_library_name="sqlalchemy"
)
def sanitize_yaml_and_save_datasource(
context: DataContext, datasource_yaml: str, overwrite_existing: bool = False
) -> None:
"""A convenience function used in notebooks to help users save secrets."""
if not datasource_yaml:
raise ValueError("Please verify the yaml and try again.")
if not isinstance(datasource_yaml, str):
raise TypeError("Please pass in a valid yaml string.")
config = yaml.load(datasource_yaml)
try:
datasource_name = config.pop("name")
except KeyError:
raise ValueError("The datasource yaml is missing a `name` attribute.")
if not overwrite_existing and check_if_datasource_name_exists(
context=context, datasource_name=datasource_name
):
print(
f'**WARNING** A Datasource named "{datasource_name}" already exists in this Data Context. The Datasource has *not* been saved. Please use a different name or set overwrite_existing=True if you want to overwrite!'
)
return
if "credentials" in config.keys():
credentials = config["credentials"]
config["credentials"] = "${" + datasource_name + "}"
context.save_config_variable(datasource_name, credentials)
context.add_datasource(name=datasource_name, **config)
# TODO it might be nice to hint that remote urls can be entered here!
PROMPT_FILES_BASE_PATH = """
Enter the path of the root directory where the data files are stored. If files are on local disk enter a path relative to your current working directory or an absolute path.
"""
CLI_ONLY_SQLALCHEMY_ORDERED_DEPENDENCY_MODULE_NAMES: list = [
# 'great_expectations.datasource.batch_kwargs_generator.query_batch_kwargs_generator',
"great_expectations.datasource.batch_kwargs_generator.table_batch_kwargs_generator",
"great_expectations.dataset.sqlalchemy_dataset",
"great_expectations.validator.validator",
"great_expectations.datasource.sqlalchemy_datasource",
]
def check_if_datasource_name_exists(context: DataContext, datasource_name: str) -> bool:
"""
Check if a Datasource name already exists in the on-disk version of the given DataContext and if so raise an error
Args:
context: DataContext to check for existing Datasource
datasource_name: name of the proposed Datasource
Returns:
boolean True if datasource name exists in on-disk config, else False
"""
# TODO: 20210324 Anthony: Note reading the context from disk is a temporary fix to allow use in a notebook
# after test_yaml_config(). test_yaml_config() should update a copy of the in-memory data context rather than
# making changes directly to the in-memory context.
context_on_disk: DataContext = DataContext(context.root_directory)
return datasource_name in [d["name"] for d in context_on_disk.list_datasources()]
| apache-2.0 |
pratapvardhan/pandas | scripts/find_undoc_args.py | 5 | 5098 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script that compares the signature arguments with the ones in the docsting
and returns the differences in plain text or GitHub task list format.
Usage::
$ ./find_undoc_args.py (see arguments below)
"""
from __future__ import print_function
import sys
from collections import namedtuple
import types
import os
import re
import argparse
import inspect
parser = argparse.ArgumentParser(description='Program description.')
parser.add_argument('-p', '--path', metavar='PATH', type=str, required=False,
default=None, action='store',
help='full path relative to which paths wills be reported')
parser.add_argument('-m', '--module', metavar='MODULE', type=str,
required=True, action='store',
help='name of package to import and examine')
parser.add_argument('-G', '--github_repo', metavar='REPO', type=str,
required=False, default=None, action='store',
help='github project where the code lives, '
'e.g. "pandas-dev/pandas"')
args = parser.parse_args()
Entry = namedtuple('Entry',
'func path lnum undoc_names missing_args '
'nsig_names ndoc_names')
def entry_gen(root_ns, module_name):
"""Walk and yield all methods and functions in the module root_ns and
submodules."""
q = [root_ns]
seen = set()
while q:
ns = q.pop()
for x in dir(ns):
cand = getattr(ns, x)
if (isinstance(cand, types.ModuleType) and
cand.__name__ not in seen and
cand.__name__.startswith(module_name)):
seen.add(cand.__name__)
q.insert(0, cand)
elif (isinstance(cand, (types.MethodType, types.FunctionType)) and
cand not in seen and cand.__doc__):
seen.add(cand)
yield cand
def cmp_docstring_sig(f):
"""Return an `Entry` object describing the differences between the
arguments in the signature and the documented ones."""
def build_loc(f):
path = f.__code__.co_filename.split(args.path, 1)[-1][1:]
return dict(path=path, lnum=f.__code__.co_firstlineno)
sig_names = set(inspect.getargspec(f).args)
# XXX numpydoc can be used to get the list of parameters
doc = f.__doc__.lower()
doc = re.split('^\s*parameters\s*', doc, 1, re.M)[-1]
doc = re.split('^\s*returns*', doc, 1, re.M)[0]
doc_names = {x.split(":")[0].strip() for x in doc.split('\n')
if re.match('\s+[\w_]+\s*:', x)}
sig_names.discard('self')
doc_names.discard('kwds')
doc_names.discard('kwargs')
doc_names.discard('args')
return Entry(func=f, path=build_loc(f)['path'], lnum=build_loc(f)['lnum'],
undoc_names=sig_names.difference(doc_names),
missing_args=doc_names.difference(sig_names),
nsig_names=len(sig_names), ndoc_names=len(doc_names))
def format_id(i):
return i
def format_item_as_github_task_list(i, item, repo):
tmpl = ('- [ ] {id_}) [{fname}:{lnum} ({func_name}())]({link}) - '
'__Missing__[{nmissing}/{total_args}]: {undoc_names}')
link_tmpl = "https://github.com/{repo}/blob/master/{file}#L{lnum}"
link = link_tmpl.format(repo=repo, file=item.path, lnum=item.lnum)
s = tmpl.format(id_=i, fname=item.path, lnum=item.lnum,
func_name=item.func.__name__, link=link,
nmissing=len(item.undoc_names),
total_args=item.nsig_names,
undoc_names=list(item.undoc_names))
if item.missing_args:
s += ' __Extra__(?): %s' % list(item.missing_args)
return s
def format_item_as_plain(i, item):
tmpl = ('+{lnum} {path} {func_name}(): '
'Missing[{nmissing}/{total_args}]={undoc_names}')
s = tmpl.format(path=item.path, lnum=item.lnum,
func_name=item.func.__name__,
nmissing=len(item.undoc_names),
total_args=item.nsig_names,
undoc_names=list(item.undoc_names))
if item.missing_args:
s += ' Extra(?)=%s' % list(item.missing_args)
return s
def main():
module = __import__(args.module)
if not args.path:
args.path = os.path.dirname(module.__file__)
collect = [cmp_docstring_sig(e)
for e in entry_gen(module, module.__name__)]
# only include if there are missing arguments in the docstring
# (fewer false positives) and there are at least some documented arguments
collect = [e for e in collect
if e.undoc_names and len(e.undoc_names) != e.nsig_names]
collect.sort(key=lambda x: x.path)
if args.github_repo:
for i, item in enumerate(collect, 1):
print(format_item_as_github_task_list(i, item, args.github_repo))
else:
for i, item in enumerate(collect, 1):
print(format_item_as_plain(i, item))
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
azogue/enerpi | enerpi/database.py | 1 | 9124 | # -*- coding: utf-8 -*-
"""
ENERPI - Database methods:
- Get/Init/Update ENERPI data catalog
- Process or clear log files
- Method for appending raw data to ENERPI catalog from the ENERPI Logger
...
"""
import datetime as dt
import os
import pandas as pd
from time import time
import re
from enerpi.base import CONFIG, DATA_PATH, log, timeit, SENSORS
from enerpi.catalog import EnerpiCatalog
# Config:
INIT_LOG_MARK = CONFIG.get('ENERPI_SAMPLER', 'INIT_LOG_MARK', fallback='INIT ENERPI')
HDF_STORE = CONFIG.get('ENERPI_DATA', 'HDF_STORE')
HDF_STORE_PATH = os.path.join(DATA_PATH, HDF_STORE)
KEY = CONFIG.get('ENERPI_DATA', 'KEY', fallback='/rms')
CONFIG_CATALOG = dict(raw_file=HDF_STORE,
key_raw_data=KEY,
key_summary_data='/hours',
key_summary_extra='/days',
check_integrity=True,
archive_existent=False,
verbose=False)
def init_catalog(sensors=None, base_path=DATA_PATH, **kwargs):
"""
Get ENERPI data catalog for access & operation with params.
:param sensors: Sensors config object (class EnerpiSamplerConf)
:param base_path: :str: ENERPIDATA base path
:param kwargs: :dict: parameters
:return: :EnerpiCatalog:
"""
conf = CONFIG_CATALOG.copy()
conf.update(base_path=base_path)
if kwargs:
conf.update(**kwargs)
return EnerpiCatalog(sensors=sensors, **conf)
def _clean_store_path(path_st):
if os.path.pathsep not in path_st:
path_st = os.path.join(DATA_PATH, path_st)
else:
path_st = os.path.abspath(path_st)
if not os.path.splitext(path_st)[1]:
path_st += '.h5'
return path_st
def show_info_data(df, df_consumo=None):
"""
Prints some info about DATA (& opt SUMMARY_DATA)
:param df: :pd.DataFrame: DATA
:param df_consumo: :pd.DataFrame: SUMMARY_DATA
"""
log('DATAFRAME INFO:\n* Head:\n{}'.format(df.head()), 'info', True, False)
log('* Tail:\n{}'.format(df.tail()), 'info', True, False)
log('* Count & types:\n{}'
.format(pd.concat([df.count().rename('N_rows'), df.dtypes.rename('dtypes'), df.describe().drop('count').T],
axis=1)), 'info', True, False)
if df_consumo is not None and not df_consumo.empty:
log('\n** HOURLY ELECTRICITY CONSUMPTION (kWh):\n{}'.format(df_consumo), 'magenta', True, False)
dias = df_consumo.drop(['p_min', 'p_mean', 'p_max'], axis=1).resample('1D').sum()
p_rs = df_consumo[['p_min', 'p_max']].resample('1D')
dias = dias.join(p_rs.p_min.min()).join(p_rs.p_max.max())
dias['t_ref'] /= 24
log('\n*** DAILY ELECTRICITY CONSUMPTION (kWh):\n{}'.format(dias), 'ok', True, False)
def _notify_error_in_save_raw_data(msg_error):
from time import sleep
from enerpi.notifier import push_enerpi_error
t = push_enerpi_error('SAVE RAW DATA', msg_error)
sleep(1)
return t
def save_raw_data(data=None, path_st=HDF_STORE_PATH, catalog=None, verb=True):
"""
Used in a subprocess launched from enerpimeter, this functions appends new *raw data* to the HDF raw store,
and, if data-catalog is not None, updates it.
:param data:
:param path_st:
:param catalog:
:param verb:
:return:
"""
try:
df_tot = None
if data is not None and type(data) is not pd.DataFrame:
data = pd.DataFrame(data, columns=SENSORS.columns_sampling
).set_index(SENSORS.ts_column).dropna().astype(float)
mode = 'a' if os.path.exists(path_st) else 'w'
try:
with pd.HDFStore(path_st,
mode=mode, complevel=9, complib='zlib') as st:
st.append(KEY, data)
if catalog is not None:
df_tot = st[KEY]
log('Size Store: {:.1f} KB, {} rows'.format(os.path.getsize(path_st) / 1000, len(df_tot)),
'debug', verb)
except OSError as e:
msg_error = 'OSError "{}" trying to open "{}" in "{}" mode (save_in_store)'.format(e, path_st, mode)
log(msg_error, 'error', True)
_notify_error_in_save_raw_data(msg_error)
return -1
if catalog is not None:
try:
catalog.update_catalog(data=df_tot)
except Exception as e:
msg_error = 'Exception "{}" [{}] en update_catalog (save_in_store)'.format(e, e.__class__)
log(msg_error, 'error', True)
_notify_error_in_save_raw_data(msg_error)
return -1
return True
except ValueError as e:
log('ValueError en save_in_store: {}'.format(e), 'error', True)
return -1
@timeit('get_ts_last_save')
def get_ts_last_save(path_st=HDF_STORE_PATH, get_last_sample=False, verbose=True, n=3):
"""
Returns last data timestamp in hdf store.
:param path_st: :str: hdf store file path
:param get_last_sample: :bool: returns ts or pd.DataFrame
:param verbose: :bool: shows logging msgs in stdout.
:param n: :int: # of tail rows
:return: pd.Timestamp or pd.DataFrame
"""
tic = time()
try:
ts = dt.datetime.fromtimestamp(os.path.getmtime(path_st))
size_kb = os.path.getsize(path_st) / 1000
if get_last_sample:
with pd.HDFStore(path_st, mode='r') as st:
try:
df = st.select(KEY, start=-n)
log('Store UPDATE: {:%c} , SIZE = {:.2f} KB. TOOK {:.3f} s'.format(ts, size_kb, time() - tic),
'debug', verbose)
return df
except KeyError:
log('ERROR: Data "{}" not found in store "{}"'.format(KEY, path_st), 'err', True)
return ts
log('Store UPDATE: {:%c} , SIZE = {:.2f} KB. TOOK {:.3f} s'.format(ts, size_kb, time() - tic), 'debug', verbose)
return ts
except FileNotFoundError:
log('ERROR: Store not found in {}'.format(path_st), 'err', True)
return None
def delete_log_file(log_file, verbose=True):
"""
Removes (logging) file from disk.
:param log_file: :str: logging file path
:param verbose: :bool: shows logging msgs in stdout.
"""
if os.path.exists(log_file) and os.path.isfile(log_file):
log('Deleting LOG FILE in {} ...'.format(log_file), 'warn', verbose, False)
os.remove(log_file)
return True
return False
@timeit('extract_log_file')
def extract_log_file(log_file, extract_temps=True, verbose=True):
"""
Extracts pd.DataFrame from logging file.
:param log_file: :str: logging file path
:param extract_temps: :bool: process RPI temperature logging entries (appends columns 'CPU' & 'GPU')
:param verbose: :bool: shows logging msgs in stdout.
:return: time-indexed pd.DataFrame
"""
if os.path.exists(log_file):
rg_log_msg = re.compile('(?P<tipo>INFO|WARNING|DEBUG|ERROR) \[(?P<func>.+?)\] '
'- (?P<ts>\d{1,2}/\d\d/\d\d\d\d \d\d:\d\d:\d\d): (?P<msg>.*?)\n', re.DOTALL)
with open(log_file, 'r') as log_f:
df_log = pd.DataFrame(rg_log_msg.findall(log_f.read()),
columns=['tipo', 'func', 'ts', 'msg'])
df_log.drop('func', axis=1, inplace=True)
df_log['tipo'] = df_log['tipo'].astype('category')
df_log['ts'] = df_log['ts'].apply(lambda x: dt.datetime.strptime(x, '%d/%m/%Y %H:%M:%S'))
df_log.loc[df_log.msg.str.startswith('Tªs --> '), 'temp'] = True
df_log.loc[df_log.msg.str.startswith('SENDED: '), 'debug_send'] = True
b_warn = df_log['tipo'] == 'WARNING'
df_log.loc[b_warn, 'no_red'] = df_log[b_warn].msg.str.startswith('OSError: [Errno 101]; C_UNREACHABLE:')
# df_log.loc[b_warn, 'no_red'] = df_log[b_warn].msg.str.startswith('OSError: La red es inaccesible')
df_log['exec'] = df_log['msg'].str.contains(INIT_LOG_MARK).cumsum().astype(int)
df_log = df_log.set_index('ts')
if extract_temps:
rg_temps = 'Tªs --> (?P<CPU>\d{1,2}\.\d) / (?P<GPU>\d{1,2}\.\d) ºC'
df_log = df_log.join(df_log[df_log['temp'].notnull()].msg.str.extract(rg_temps, expand=True).astype(float))
if verbose:
clasific = df_log.groupby(['exec', 'tipo']).count().dropna(how='all').astype(int)
log(clasific, 'ok', True, False)
conteo_tipos = df_log.groupby('tipo').count()
if 'ERROR' in conteo_tipos.index:
log(df_log[df_log.tipo == 'ERROR'].dropna(how='all', axis=1), 'error', True, False)
if 'INFO' in conteo_tipos.index:
log(df_log[df_log.tipo == 'INFO'].dropna(how='all', axis=1), 'info', True, False)
return df_log
else:
log("extract_log_file: '{}' doesn't exists".format(log_file), 'error', verbose, False)
return pd.DataFrame([], columns=['tipo', 'func', 'ts', 'msg'])
| mit |
DavidJahn86/terapy | spectroComparism.py | 1 | 7513 | #!/usr/bin/env python
'''
SNR Plot in FD&TD, DR Plot in FD&TD, TD with u for reference files from two different spectrometers/runs
'''
import argparse
import sys
import matplotlib.pyplot as plt
import glob
import Terapy
import TeraData
from matplotlib2tikz import save as tikz_save
plt.rc('text',usetex=True)
parser = argparse.ArgumentParser(description='Do a spectrometer comparism')
parser.add_argument('--outname','-o',nargs='?',type=str,help='prefix output filenames')
parser.add_argument('--ireference1','-ir1',nargs='*',help='list of reference filenames, dataset 1')
parser.add_argument('--ireference2','-ir2',nargs='*',help='list of reference filenames, dataset 2')
parser.add_argument('--mode1','-m1',type=str,default='INRIM',choices=['INRIM','Marburg','lucastestformat'],help='format of the datafiles of dataset 1')
parser.add_argument('--mode2','-m2',type=str,default='INRIM',choices=['INRIM','Marburg','lucastestformat'],help='format of the datafiles of dataset 2')
parser.add_argument('--workpath','-w',type=str,default='',help='specify a base folder')
args = parser.parse_args()
ireffiles1=args.ireference1
ireffiles2=args.ireference2
mode1=args.mode1
mode2=args.mode2
basefolder=args.workpath
reffiles1=[]
reffiles2=[]
for i in range(len(ireffiles1)):
tf=glob.glob(basefolder+ireffiles1[i])
reffiles1+=tf
for i in range(len(ireffiles2)):
tf=glob.glob(basefolder+ireffiles2[i])
reffiles2+=tf
if len(reffiles1)==0:
print "no Reference File for spectrometer one specified"
sys.exit()
if len(reffiles2)==0:
print "no Reference File for spectrometer two specified"
sys.exit()
if mode1=='lucastestformat':
reftd1=TeraData.THzTdData(reffiles1)
elif mode1=='Marburg':
reftd1=TeraData.ImportMarburgData(reffiles1)
elif mode1=='INRIM':
reftd1=TeraData.ImportInrimData(reffiles1)
if mode2=='lucastestformat':
reftd2=TeraData.THzTdData(reffiles2)
elif mode2=='Marburg':
reftd2=TeraData.ImportMarburgData(reffiles2)
elif mode2=='INRIM':
reftd2=TeraData.ImportInrimData(reffiles2)
# #initialize the fd_data objects
ref_fd1=TeraData.FdData(reftd1)
ref_fd2=TeraData.FdData(reftd2)
#shift to same time!
reftd1.tdData[:,0]-=reftd1.getPeakPosition()
reftd2.tdData[:,0]-=reftd2.getPeakPosition()
fig = plt.figure()
ax = fig.add_subplot(2,1,1)
ax.set_xlabel('frequency (GHz)')
ax.set_ylabel('SNR')
ax.grid(True)
ax.semilogy(ref_fd1.getfreqsGHz(),ref_fd1.getSNR(),ref_fd2.getfreqsGHz(),ref_fd2.getSNR())
ax.legend((mode1, mode2))
plt.title('SNR')
#fig2 = plt.figure()
ax2 = fig.add_subplot(2,1,2)
ax2.set_xlabel('time (ps)')
ax2.set_ylabel('SNR')
ax2.grid(True)
ax2.semilogy(reftd1.getTimesPs(),reftd1.getSNR(),reftd2.getTimesPs(),reftd2.getSNR())
ax2.legend((mode1, mode2))
fig.savefig(basefolder+'SNR-Plot.png')
tikz_save(basefolder+'SNR-Plot.tikz',figureheight='\\figureheight',figurewidth='\\figurewidth')
#plt.title('SNR')
fig1 = plt.figure()
ax1 = fig1.add_subplot(2,1,1)
ax1.set_xlabel('frequency (GHz)')
ax1.set_ylabel('dynamic range')
ax1.grid(True)
ax1.semilogy(ref_fd1.getfreqsGHz(),ref_fd1.getDR(),ref_fd2.getfreqsGHz(),ref_fd2.getDR())
ax1.legend((mode1,mode2))
plt.title('dynamic range')
#fig3 = plt.figure()
ax3 = fig1.add_subplot(2,1,2)
ax3.set_xlabel('time (ps)')
ax3.set_ylabel('dynamic range')
ax3.grid(True)
ax3.semilogy(reftd1.getTimesPs(),reftd1.getDR(),reftd2.getTimesPs(),reftd2.getDR())
ax3.legend((mode1,mode2))
#plt.title('dynamic range')
fig1.savefig(basefolder+'DR-Plot.png')
tikz_save(basefolder+'DR-Plot.tikz',figureheight='\\figureheight',figurewidth='\\figurewidth')
fig2 = plt.figure()
ax4 = fig2.add_subplot(2,1,1)
ax4.set_xlabel('time (ps)')
ax4.set_ylabel('X channel (V)')
#ax4.grid(True)
no_std=2
ax4.plot(reftd1.getTimesPs(),reftd1.getEX(),\
reftd1.getTimesPs(),reftd1.getEX() + no_std*reftd1.getUncEX(),'g--',\
reftd1.getTimesPs(),reftd1.getEX() - no_std*reftd1.getUncEX(),'g--')
#ax4.legend(('ref'))
plt.title('Reference spectrometer ' + mode1+ ' with uncertainty')
#fig5 = plt.figure()
ax5 = fig2.add_subplot(2,1,2)
ax5.set_xlabel('time (ps)')
ax5.set_ylabel('X channel (V)')
#ax4.grid(True)
no_std=2
ax5.plot(reftd2.getTimesPs(),reftd2.getEX(),\
reftd2.getTimesPs(),reftd2.getEX() + no_std*reftd2.getUncEX(),'g--',\
reftd2.getTimesPs(),reftd2.getEX() - no_std*reftd2.getUncEX(),'g--')
#ax5.legend(('sam'))
plt.title('Reference spectrometer ' + mode2+ ' with uncertainty')
fig2.savefig(basefolder+'TDSignal-Plot.png')
tikz_save(basefolder+'TDSignal-Plot.tikz',figureheight='\\figureheight',figurewidth='\\figurewidth')
fig3 = plt.figure()
ax6 = fig3.add_subplot(2,1,1)
ax6.set_xlabel('frequency (GHz)')
ax6.set_ylabel('dynamic range')
ax6.grid(True)
ax6.semilogy(ref_fd1.getfreqsGHz(),ref_fd1.getFAbs(),\
ref_fd1.getfreqsGHz(), ref_fd1.getFAbs() + ref_fd1.getFAbsUnc(), 'g--',\
ref_fd1.getfreqsGHz(), ref_fd1.getFAbs() - ref_fd1.getFAbsUnc(), 'g--',
ref_fd2.getfreqsGHz(),ref_fd2.getFAbs(),\
ref_fd2.getfreqsGHz(), ref_fd2.getFAbs() + ref_fd2.getFAbsUnc(), 'g--',\
ref_fd2.getfreqsGHz(), ref_fd2.getFAbs() - ref_fd2.getFAbsUnc(), 'g--')
#ax6.legend(('ref'))
plt.title('ABS with U')
ax7 = fig3.add_subplot(2,1,2)
ax7.set_xlabel('frequency (GHz)')
ax7.set_ylabel('dynamic range')
ax7.grid(True)
ax7.plot(ref_fd1.getfreqsGHz(),ref_fd1.getFPh(),\
ref_fd1.getfreqsGHz(), ref_fd1.getFPh() + ref_fd1.getFPhUnc(), 'g--',\
ref_fd1.getfreqsGHz(), ref_fd1.getFPh() - ref_fd1.getFPhUnc(), 'g--',
ref_fd2.getfreqsGHz(),ref_fd2.getFPh(),\
ref_fd2.getfreqsGHz(), ref_fd2.getFPh() + ref_fd2.getFPhUnc(), 'g--',\
ref_fd2.getfreqsGHz(), ref_fd2.getFPh() - ref_fd2.getFPhUnc(), 'g--')
fig3.savefig(basefolder+'FDSignal.png')
tikz_save(basefolder+'FDSignal.tikz',figureheight='\\figureheight',figurewidth='\\figurewidth')
#ax7.legend(('ref'))
plt.title('PHASE with U')
fd = file( basefolder + 'SignalInfo.log', 'w')
fd.write('max DR in FD - ref1\t' + str(max(ref_fd1.getDR())) + '\n'\
'max DR in FD - ref2\t' + str(max(ref_fd2.getDR())) + '\n'\
'max DR in TD - ref1\t' + str(max(reftd1.getDR())) + '\n'\
'max DR in TD - ref2\t' + str(max(reftd2.getDR())) + '\n\n'\
'max SNR in FD - ref1\t' + str(max(ref_fd1.getSNR())) + '\n'\
'max SNR in FD - ref2\t' + str(max(ref_fd2.getSNR())) + '\n'\
'max SNR in TD - ref1\t' + str(max(reftd1.getSNR())) + '\n'\
'max SNR in TD - ref2\t' + str(max(reftd2.getSNR())) + '\n')
fd.close()
''' maxDR of fourier and timedomain data
max SNR of fourier and timedomain data
Bandwidth, highest and lowest accesible frequency
'''
'''if args.outname==None:
args.outname=myana.getFilenameSuggestion()
args.outname+='_'
if args.savePlots:
pylab.ioff()
reftd.doPlotWithunc()
samtd.doPlotWithunc()
pylab.legend(('Reference','Sample'))
pylab.savefig(args.workpath+args.outname + 'Time-Domain.png')
pylab.close()
ref_fd.doPlot()
sam_fd.doPlot()
pylab.figure('FD-ABS-Plot')
pylab.legend(('Reference','Sample'))
pylab.savefig(args.outname + 'ABS-Frequency-Domain.png')
pylab.close()
pylab.figure('FD-PHASE-Plot')
pylab.legend(('Reference','Sample'))
pylab.savefig(args.workpath+args.outname + 'PHASE-Frequency-Domain.png')
pylab.close()
mdata.doPlot()
pylab.savefig(args.workpath+args.outname + 'TransferFunction.png')
pylab.close()
#
myana.plotRefractiveIndex(1,1,args.workpath+args.outname)
myana.saveResults(args.workpath+args.outname)
#'''
plt.show()
| gpl-2.0 |
bgris/ODL_bgris | lib/python3.5/site-packages/matplotlib/testing/jpl_units/UnitDblFormatter.py | 23 | 1485 | #===========================================================================
#
# UnitDblFormatter
#
#===========================================================================
"""UnitDblFormatter module containing class UnitDblFormatter."""
#===========================================================================
# Place all imports after here.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.ticker as ticker
#
# Place all imports before here.
#===========================================================================
__all__ = [ 'UnitDblFormatter' ]
#===========================================================================
class UnitDblFormatter( ticker.ScalarFormatter ):
"""The formatter for UnitDbl data types. This allows for formatting
with the unit string.
"""
def __init__( self, *args, **kwargs ):
'The arguments are identical to matplotlib.ticker.ScalarFormatter.'
ticker.ScalarFormatter.__init__( self, *args, **kwargs )
def __call__( self, x, pos = None ):
'Return the format for tick val x at position pos'
if len(self.locs) == 0:
return ''
else:
return str(x)
def format_data_short( self, value ):
"Return the value formatted in 'short' format."
return str(value)
def format_data( self, value ):
"Return the value formatted into a string."
return str(value)
| gpl-3.0 |
anntzer/scikit-learn | sklearn/neural_network/_multilayer_perceptron.py | 8 | 56999 | """Multi-layer Perceptron
"""
# Authors: Issam H. Laradji <[email protected]>
# Andreas Mueller
# Jiyuan Qian
# License: BSD 3 clause
import numpy as np
from abc import ABCMeta, abstractmethod
import warnings
import scipy.optimize
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..base import is_classifier
from ._base import ACTIVATIONS, DERIVATIVES, LOSS_FUNCTIONS
from ._stochastic_optimizers import SGDOptimizer, AdamOptimizer
from ..model_selection import train_test_split
from ..preprocessing import LabelBinarizer
from ..utils import gen_batches, check_random_state
from ..utils import shuffle
from ..utils import _safe_indexing
from ..utils import column_or_1d
from ..exceptions import ConvergenceWarning
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted, _deprecate_positional_args
from ..utils.multiclass import _check_partial_fit_first_call, unique_labels
from ..utils.multiclass import type_of_target
from ..utils.optimize import _check_optimize_result
_STOCHASTIC_SOLVERS = ['sgd', 'adam']
def _pack(coefs_, intercepts_):
"""Pack the parameters into a single vector."""
return np.hstack([l.ravel() for l in coefs_ + intercepts_])
class BaseMultilayerPerceptron(BaseEstimator, metaclass=ABCMeta):
"""Base class for MLP classification and regression.
Warning: This class should not be used directly.
Use derived classes instead.
.. versionadded:: 0.18
"""
@abstractmethod
def __init__(self, hidden_layer_sizes, activation, solver,
alpha, batch_size, learning_rate, learning_rate_init, power_t,
max_iter, loss, shuffle, random_state, tol, verbose,
warm_start, momentum, nesterovs_momentum, early_stopping,
validation_fraction, beta_1, beta_2, epsilon,
n_iter_no_change, max_fun):
self.activation = activation
self.solver = solver
self.alpha = alpha
self.batch_size = batch_size
self.learning_rate = learning_rate
self.learning_rate_init = learning_rate_init
self.power_t = power_t
self.max_iter = max_iter
self.loss = loss
self.hidden_layer_sizes = hidden_layer_sizes
self.shuffle = shuffle
self.random_state = random_state
self.tol = tol
self.verbose = verbose
self.warm_start = warm_start
self.momentum = momentum
self.nesterovs_momentum = nesterovs_momentum
self.early_stopping = early_stopping
self.validation_fraction = validation_fraction
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.n_iter_no_change = n_iter_no_change
self.max_fun = max_fun
def _unpack(self, packed_parameters):
"""Extract the coefficients and intercepts from packed_parameters."""
for i in range(self.n_layers_ - 1):
start, end, shape = self._coef_indptr[i]
self.coefs_[i] = np.reshape(packed_parameters[start:end], shape)
start, end = self._intercept_indptr[i]
self.intercepts_[i] = packed_parameters[start:end]
def _forward_pass(self, activations):
"""Perform a forward pass on the network by computing the values
of the neurons in the hidden layers and the output layer.
Parameters
----------
activations : list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
"""
hidden_activation = ACTIVATIONS[self.activation]
# Iterate over the hidden layers
for i in range(self.n_layers_ - 1):
activations[i + 1] = safe_sparse_dot(activations[i],
self.coefs_[i])
activations[i + 1] += self.intercepts_[i]
# For the hidden layers
if (i + 1) != (self.n_layers_ - 1):
hidden_activation(activations[i + 1])
# For the last layer
output_activation = ACTIVATIONS[self.out_activation_]
output_activation(activations[i + 1])
return activations
def _forward_pass_fast(self, X):
"""Predict using the trained model
This is the same as _forward_pass but does not record the activations
of all layers and only returns the last layer's activation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The decision function of the samples for each class in the model.
"""
X = self._validate_data(X, accept_sparse=['csr', 'csc'], reset=False)
# Initialize first layer
activation = X
# Forward propagate
hidden_activation = ACTIVATIONS[self.activation]
for i in range(self.n_layers_ - 1):
activation = safe_sparse_dot(activation, self.coefs_[i])
activation += self.intercepts_[i]
if i != self.n_layers_ - 2:
hidden_activation(activation)
output_activation = ACTIVATIONS[self.out_activation_]
output_activation(activation)
return activation
def _compute_loss_grad(self, layer, n_samples, activations, deltas,
coef_grads, intercept_grads):
"""Compute the gradient of loss with respect to coefs and intercept for
specified layer.
This function does backpropagation for the specified one layer.
"""
coef_grads[layer] = safe_sparse_dot(activations[layer].T,
deltas[layer])
coef_grads[layer] += (self.alpha * self.coefs_[layer])
coef_grads[layer] /= n_samples
intercept_grads[layer] = np.mean(deltas[layer], 0)
def _loss_grad_lbfgs(self, packed_coef_inter, X, y, activations, deltas,
coef_grads, intercept_grads):
"""Compute the MLP loss function and its corresponding derivatives
with respect to the different parameters given in the initialization.
Returned gradients are packed in a single vector so it can be used
in lbfgs
Parameters
----------
packed_coef_inter : ndarray
A vector comprising the flattened coefficients and intercepts.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
y : ndarray of shape (n_samples,)
The target values.
activations : list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
loss : float
grad : array-like, shape (number of nodes of all layers,)
"""
self._unpack(packed_coef_inter)
loss, coef_grads, intercept_grads = self._backprop(
X, y, activations, deltas, coef_grads, intercept_grads)
grad = _pack(coef_grads, intercept_grads)
return loss, grad
def _backprop(self, X, y, activations, deltas, coef_grads,
intercept_grads):
"""Compute the MLP loss function and its corresponding derivatives
with respect to each parameter: weights and bias vectors.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
y : ndarray of shape (n_samples,)
The target values.
activations : list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
loss : float
coef_grads : list, length = n_layers - 1
intercept_grads : list, length = n_layers - 1
"""
n_samples = X.shape[0]
# Forward propagate
activations = self._forward_pass(activations)
# Get loss
loss_func_name = self.loss
if loss_func_name == 'log_loss' and self.out_activation_ == 'logistic':
loss_func_name = 'binary_log_loss'
loss = LOSS_FUNCTIONS[loss_func_name](y, activations[-1])
# Add L2 regularization term to loss
values = 0
for s in self.coefs_:
s = s.ravel()
values += np.dot(s, s)
loss += (0.5 * self.alpha) * values / n_samples
# Backward propagate
last = self.n_layers_ - 2
# The calculation of delta[last] here works with following
# combinations of output activation and loss function:
# sigmoid and binary cross entropy, softmax and categorical cross
# entropy, and identity with squared loss
deltas[last] = activations[-1] - y
# Compute gradient for the last layer
self._compute_loss_grad(
last, n_samples, activations, deltas, coef_grads, intercept_grads)
inplace_derivative = DERIVATIVES[self.activation]
# Iterate over the hidden layers
for i in range(self.n_layers_ - 2, 0, -1):
deltas[i - 1] = safe_sparse_dot(deltas[i], self.coefs_[i].T)
inplace_derivative(activations[i], deltas[i - 1])
self._compute_loss_grad(
i - 1, n_samples, activations, deltas, coef_grads,
intercept_grads)
return loss, coef_grads, intercept_grads
def _initialize(self, y, layer_units, dtype):
# set all attributes, allocate weights etc for first call
# Initialize parameters
self.n_iter_ = 0
self.t_ = 0
self.n_outputs_ = y.shape[1]
# Compute the number of layers
self.n_layers_ = len(layer_units)
# Output for regression
if not is_classifier(self):
self.out_activation_ = 'identity'
# Output for multi class
elif self._label_binarizer.y_type_ == 'multiclass':
self.out_activation_ = 'softmax'
# Output for binary class and multi-label
else:
self.out_activation_ = 'logistic'
# Initialize coefficient and intercept layers
self.coefs_ = []
self.intercepts_ = []
for i in range(self.n_layers_ - 1):
coef_init, intercept_init = self._init_coef(layer_units[i],
layer_units[i + 1],
dtype)
self.coefs_.append(coef_init)
self.intercepts_.append(intercept_init)
if self.solver in _STOCHASTIC_SOLVERS:
self.loss_curve_ = []
self._no_improvement_count = 0
if self.early_stopping:
self.validation_scores_ = []
self.best_validation_score_ = -np.inf
else:
self.best_loss_ = np.inf
def _init_coef(self, fan_in, fan_out, dtype):
# Use the initialization method recommended by
# Glorot et al.
factor = 6.
if self.activation == 'logistic':
factor = 2.
init_bound = np.sqrt(factor / (fan_in + fan_out))
# Generate weights and bias:
coef_init = self._random_state.uniform(-init_bound, init_bound,
(fan_in, fan_out))
intercept_init = self._random_state.uniform(-init_bound, init_bound,
fan_out)
coef_init = coef_init.astype(dtype, copy=False)
intercept_init = intercept_init.astype(dtype, copy=False)
return coef_init, intercept_init
def _fit(self, X, y, incremental=False):
# Make sure self.hidden_layer_sizes is a list
hidden_layer_sizes = self.hidden_layer_sizes
if not hasattr(hidden_layer_sizes, "__iter__"):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
# Validate input parameters.
self._validate_hyperparameters()
if np.any(np.array(hidden_layer_sizes) <= 0):
raise ValueError("hidden_layer_sizes must be > 0, got %s." %
hidden_layer_sizes)
first_pass = (not hasattr(self, 'coefs_') or
(not self.warm_start and not incremental))
X, y = self._validate_input(X, y, incremental, reset=first_pass)
n_samples, n_features = X.shape
# Ensure y is 2D
if y.ndim == 1:
y = y.reshape((-1, 1))
self.n_outputs_ = y.shape[1]
layer_units = ([n_features] + hidden_layer_sizes +
[self.n_outputs_])
# check random state
self._random_state = check_random_state(self.random_state)
if first_pass:
# First time training the model
self._initialize(y, layer_units, X.dtype)
# Initialize lists
activations = [X] + [None] * (len(layer_units) - 1)
deltas = [None] * (len(activations) - 1)
coef_grads = [np.empty((n_fan_in_, n_fan_out_), dtype=X.dtype)
for n_fan_in_,
n_fan_out_ in zip(layer_units[:-1],
layer_units[1:])]
intercept_grads = [np.empty(n_fan_out_, dtype=X.dtype)
for n_fan_out_ in
layer_units[1:]]
# Run the Stochastic optimization solver
if self.solver in _STOCHASTIC_SOLVERS:
self._fit_stochastic(X, y, activations, deltas, coef_grads,
intercept_grads, layer_units, incremental)
# Run the LBFGS solver
elif self.solver == 'lbfgs':
self._fit_lbfgs(X, y, activations, deltas, coef_grads,
intercept_grads, layer_units)
return self
def _validate_hyperparameters(self):
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False, got %s." %
self.shuffle)
if self.max_iter <= 0:
raise ValueError("max_iter must be > 0, got %s." % self.max_iter)
if self.max_fun <= 0:
raise ValueError("max_fun must be > 0, got %s." % self.max_fun)
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0, got %s." % self.alpha)
if (self.learning_rate in ["constant", "invscaling", "adaptive"] and
self.learning_rate_init <= 0.0):
raise ValueError("learning_rate_init must be > 0, got %s." %
self.learning_rate)
if self.momentum > 1 or self.momentum < 0:
raise ValueError("momentum must be >= 0 and <= 1, got %s" %
self.momentum)
if not isinstance(self.nesterovs_momentum, bool):
raise ValueError("nesterovs_momentum must be either True or False,"
" got %s." % self.nesterovs_momentum)
if not isinstance(self.early_stopping, bool):
raise ValueError("early_stopping must be either True or False,"
" got %s." % self.early_stopping)
if self.validation_fraction < 0 or self.validation_fraction >= 1:
raise ValueError("validation_fraction must be >= 0 and < 1, "
"got %s" % self.validation_fraction)
if self.beta_1 < 0 or self.beta_1 >= 1:
raise ValueError("beta_1 must be >= 0 and < 1, got %s" %
self.beta_1)
if self.beta_2 < 0 or self.beta_2 >= 1:
raise ValueError("beta_2 must be >= 0 and < 1, got %s" %
self.beta_2)
if self.epsilon <= 0.0:
raise ValueError("epsilon must be > 0, got %s." % self.epsilon)
if self.n_iter_no_change <= 0:
raise ValueError("n_iter_no_change must be > 0, got %s."
% self.n_iter_no_change)
# raise ValueError if not registered
if self.activation not in ACTIVATIONS:
raise ValueError("The activation '%s' is not supported. Supported "
"activations are %s."
% (self.activation, list(sorted(ACTIVATIONS))))
if self.learning_rate not in ["constant", "invscaling", "adaptive"]:
raise ValueError("learning rate %s is not supported. " %
self.learning_rate)
supported_solvers = _STOCHASTIC_SOLVERS + ["lbfgs"]
if self.solver not in supported_solvers:
raise ValueError("The solver %s is not supported. "
" Expected one of: %s" %
(self.solver, ", ".join(supported_solvers)))
def _fit_lbfgs(self, X, y, activations, deltas, coef_grads,
intercept_grads, layer_units):
# Store meta information for the parameters
self._coef_indptr = []
self._intercept_indptr = []
start = 0
# Save sizes and indices of coefficients for faster unpacking
for i in range(self.n_layers_ - 1):
n_fan_in, n_fan_out = layer_units[i], layer_units[i + 1]
end = start + (n_fan_in * n_fan_out)
self._coef_indptr.append((start, end, (n_fan_in, n_fan_out)))
start = end
# Save sizes and indices of intercepts for faster unpacking
for i in range(self.n_layers_ - 1):
end = start + layer_units[i + 1]
self._intercept_indptr.append((start, end))
start = end
# Run LBFGS
packed_coef_inter = _pack(self.coefs_,
self.intercepts_)
if self.verbose is True or self.verbose >= 1:
iprint = 1
else:
iprint = -1
opt_res = scipy.optimize.minimize(
self._loss_grad_lbfgs, packed_coef_inter,
method="L-BFGS-B", jac=True,
options={
"maxfun": self.max_fun,
"maxiter": self.max_iter,
"iprint": iprint,
"gtol": self.tol
},
args=(X, y, activations, deltas, coef_grads, intercept_grads))
self.n_iter_ = _check_optimize_result("lbfgs", opt_res, self.max_iter)
self.loss_ = opt_res.fun
self._unpack(opt_res.x)
def _fit_stochastic(self, X, y, activations, deltas, coef_grads,
intercept_grads, layer_units, incremental):
if not incremental or not hasattr(self, '_optimizer'):
params = self.coefs_ + self.intercepts_
if self.solver == 'sgd':
self._optimizer = SGDOptimizer(
params, self.learning_rate_init, self.learning_rate,
self.momentum, self.nesterovs_momentum, self.power_t)
elif self.solver == 'adam':
self._optimizer = AdamOptimizer(
params, self.learning_rate_init, self.beta_1, self.beta_2,
self.epsilon)
# early_stopping in partial_fit doesn't make sense
early_stopping = self.early_stopping and not incremental
if early_stopping:
# don't stratify in multilabel classification
should_stratify = is_classifier(self) and self.n_outputs_ == 1
stratify = y if should_stratify else None
X, X_val, y, y_val = train_test_split(
X, y, random_state=self._random_state,
test_size=self.validation_fraction,
stratify=stratify)
if is_classifier(self):
y_val = self._label_binarizer.inverse_transform(y_val)
else:
X_val = None
y_val = None
n_samples = X.shape[0]
sample_idx = np.arange(n_samples, dtype=int)
if self.batch_size == 'auto':
batch_size = min(200, n_samples)
else:
if self.batch_size < 1 or self.batch_size > n_samples:
warnings.warn("Got `batch_size` less than 1 or larger than "
"sample size. It is going to be clipped")
batch_size = np.clip(self.batch_size, 1, n_samples)
try:
for it in range(self.max_iter):
if self.shuffle:
# Only shuffle the sample indices instead of X and y to
# reduce the memory footprint. These indices will be used
# to slice the X and y.
sample_idx = shuffle(sample_idx,
random_state=self._random_state)
accumulated_loss = 0.0
for batch_slice in gen_batches(n_samples, batch_size):
if self.shuffle:
X_batch = _safe_indexing(X, sample_idx[batch_slice])
y_batch = y[sample_idx[batch_slice]]
else:
X_batch = X[batch_slice]
y_batch = y[batch_slice]
activations[0] = X_batch
batch_loss, coef_grads, intercept_grads = self._backprop(
X_batch, y_batch, activations, deltas,
coef_grads, intercept_grads)
accumulated_loss += batch_loss * (batch_slice.stop -
batch_slice.start)
# update weights
grads = coef_grads + intercept_grads
self._optimizer.update_params(grads)
self.n_iter_ += 1
self.loss_ = accumulated_loss / X.shape[0]
self.t_ += n_samples
self.loss_curve_.append(self.loss_)
if self.verbose:
print("Iteration %d, loss = %.8f" % (self.n_iter_,
self.loss_))
# update no_improvement_count based on training loss or
# validation score according to early_stopping
self._update_no_improvement_count(early_stopping, X_val, y_val)
# for learning rate that needs to be updated at iteration end
self._optimizer.iteration_ends(self.t_)
if self._no_improvement_count > self.n_iter_no_change:
# not better than last `n_iter_no_change` iterations by tol
# stop or decrease learning rate
if early_stopping:
msg = ("Validation score did not improve more than "
"tol=%f for %d consecutive epochs." % (
self.tol, self.n_iter_no_change))
else:
msg = ("Training loss did not improve more than tol=%f"
" for %d consecutive epochs." % (
self.tol, self.n_iter_no_change))
is_stopping = self._optimizer.trigger_stopping(
msg, self.verbose)
if is_stopping:
break
else:
self._no_improvement_count = 0
if incremental:
break
if self.n_iter_ == self.max_iter:
warnings.warn(
"Stochastic Optimizer: Maximum iterations (%d) "
"reached and the optimization hasn't converged yet."
% self.max_iter, ConvergenceWarning)
except KeyboardInterrupt:
warnings.warn("Training interrupted by user.")
if early_stopping:
# restore best weights
self.coefs_ = self._best_coefs
self.intercepts_ = self._best_intercepts
def _update_no_improvement_count(self, early_stopping, X_val, y_val):
if early_stopping:
# compute validation score, use that for stopping
self.validation_scores_.append(self.score(X_val, y_val))
if self.verbose:
print("Validation score: %f" % self.validation_scores_[-1])
# update best parameters
# use validation_scores_, not loss_curve_
# let's hope no-one overloads .score with mse
last_valid_score = self.validation_scores_[-1]
if last_valid_score < (self.best_validation_score_ +
self.tol):
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if last_valid_score > self.best_validation_score_:
self.best_validation_score_ = last_valid_score
self._best_coefs = [c.copy() for c in self.coefs_]
self._best_intercepts = [i.copy()
for i in self.intercepts_]
else:
if self.loss_curve_[-1] > self.best_loss_ - self.tol:
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if self.loss_curve_[-1] < self.best_loss_:
self.best_loss_ = self.loss_curve_[-1]
def fit(self, X, y):
"""Fit the model to data matrix X and target(s) y.
Parameters
----------
X : ndarray or sparse matrix of shape (n_samples, n_features)
The input data.
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : returns a trained MLP model.
"""
return self._fit(X, y, incremental=False)
@property
def partial_fit(self):
"""Update the model with a single iteration over the given data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
y : ndarray of shape (n_samples,)
The target values.
Returns
-------
self : returns a trained MLP model.
"""
if self.solver not in _STOCHASTIC_SOLVERS:
raise AttributeError("partial_fit is only available for stochastic"
" optimizers. %s is not stochastic."
% self.solver)
return self._partial_fit
def _partial_fit(self, X, y):
return self._fit(X, y, incremental=True)
class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron):
"""Multi-layer Perceptron classifier.
This model optimizes the log-loss function using LBFGS or stochastic
gradient descent.
.. versionadded:: 0.18
Parameters
----------
hidden_layer_sizes : tuple, length = n_layers - 2, default=(100,)
The ith element represents the number of neurons in the ith
hidden layer.
activation : {'identity', 'logistic', 'tanh', 'relu'}, default='relu'
Activation function for the hidden layer.
- 'identity', no-op activation, useful to implement linear bottleneck,
returns f(x) = x
- 'logistic', the logistic sigmoid function,
returns f(x) = 1 / (1 + exp(-x)).
- 'tanh', the hyperbolic tan function,
returns f(x) = tanh(x).
- 'relu', the rectified linear unit function,
returns f(x) = max(0, x)
solver : {'lbfgs', 'sgd', 'adam'}, default='adam'
The solver for weight optimization.
- 'lbfgs' is an optimizer in the family of quasi-Newton methods.
- 'sgd' refers to stochastic gradient descent.
- 'adam' refers to a stochastic gradient-based optimizer proposed
by Kingma, Diederik, and Jimmy Ba
Note: The default solver 'adam' works pretty well on relatively
large datasets (with thousands of training samples or more) in terms of
both training time and validation score.
For small datasets, however, 'lbfgs' can converge faster and perform
better.
alpha : float, default=0.0001
L2 penalty (regularization term) parameter.
batch_size : int, default='auto'
Size of minibatches for stochastic optimizers.
If the solver is 'lbfgs', the classifier will not use minibatch.
When set to "auto", `batch_size=min(200, n_samples)`
learning_rate : {'constant', 'invscaling', 'adaptive'}, default='constant'
Learning rate schedule for weight updates.
- 'constant' is a constant learning rate given by
'learning_rate_init'.
- 'invscaling' gradually decreases the learning rate at each
time step 't' using an inverse scaling exponent of 'power_t'.
effective_learning_rate = learning_rate_init / pow(t, power_t)
- 'adaptive' keeps the learning rate constant to
'learning_rate_init' as long as training loss keeps decreasing.
Each time two consecutive epochs fail to decrease training loss by at
least tol, or fail to increase validation score by at least tol if
'early_stopping' is on, the current learning rate is divided by 5.
Only used when ``solver='sgd'``.
learning_rate_init : double, default=0.001
The initial learning rate used. It controls the step-size
in updating the weights. Only used when solver='sgd' or 'adam'.
power_t : double, default=0.5
The exponent for inverse scaling learning rate.
It is used in updating effective learning rate when the learning_rate
is set to 'invscaling'. Only used when solver='sgd'.
max_iter : int, default=200
Maximum number of iterations. The solver iterates until convergence
(determined by 'tol') or this number of iterations. For stochastic
solvers ('sgd', 'adam'), note that this determines the number of epochs
(how many times each data point will be used), not the number of
gradient steps.
shuffle : bool, default=True
Whether to shuffle samples in each iteration. Only used when
solver='sgd' or 'adam'.
random_state : int, RandomState instance, default=None
Determines random number generation for weights and bias
initialization, train-test split if early stopping is used, and batch
sampling when solver='sgd' or 'adam'.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
tol : float, default=1e-4
Tolerance for the optimization. When the loss or score is not improving
by at least ``tol`` for ``n_iter_no_change`` consecutive iterations,
unless ``learning_rate`` is set to 'adaptive', convergence is
considered to be reached and training stops.
verbose : bool, default=False
Whether to print progress messages to stdout.
warm_start : bool, default=False
When set to True, reuse the solution of the previous
call to fit as initialization, otherwise, just erase the
previous solution. See :term:`the Glossary <warm_start>`.
momentum : float, default=0.9
Momentum for gradient descent update. Should be between 0 and 1. Only
used when solver='sgd'.
nesterovs_momentum : bool, default=True
Whether to use Nesterov's momentum. Only used when solver='sgd' and
momentum > 0.
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to true, it will automatically set
aside 10% of training data as validation and terminate training when
validation score is not improving by at least tol for
``n_iter_no_change`` consecutive epochs. The split is stratified,
except in a multilabel setting.
Only effective when solver='sgd' or 'adam'
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True
beta_1 : float, default=0.9
Exponential decay rate for estimates of first moment vector in adam,
should be in [0, 1). Only used when solver='adam'
beta_2 : float, default=0.999
Exponential decay rate for estimates of second moment vector in adam,
should be in [0, 1). Only used when solver='adam'
epsilon : float, default=1e-8
Value for numerical stability in adam. Only used when solver='adam'
n_iter_no_change : int, default=10
Maximum number of epochs to not meet ``tol`` improvement.
Only effective when solver='sgd' or 'adam'
.. versionadded:: 0.20
max_fun : int, default=15000
Only used when solver='lbfgs'. Maximum number of loss function calls.
The solver iterates until convergence (determined by 'tol'), number
of iterations reaches max_iter, or this number of loss function calls.
Note that number of loss function calls will be greater than or equal
to the number of iterations for the `MLPClassifier`.
.. versionadded:: 0.22
Attributes
----------
classes_ : ndarray or list of ndarray of shape (n_classes,)
Class labels for each output.
loss_ : float
The current loss computed with the loss function.
best_loss_ : float
The minimum loss reached by the solver throughout fitting.
loss_curve_ : list of shape (`n_iter_`,)
The ith element in the list represents the loss at the ith iteration.
t_ : int
The number of training samples seen by the solver during fitting.
coefs_ : list of shape (n_layers - 1,)
The ith element in the list represents the weight matrix corresponding
to layer i.
intercepts_ : list of shape (n_layers - 1,)
The ith element in the list represents the bias vector corresponding to
layer i + 1.
n_iter_ : int
The number of iterations the solver has ran.
n_layers_ : int
Number of layers.
n_outputs_ : int
Number of outputs.
out_activation_ : str
Name of the output activation function.
Examples
--------
>>> from sklearn.neural_network import MLPClassifier
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split
>>> X, y = make_classification(n_samples=100, random_state=1)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
... random_state=1)
>>> clf = MLPClassifier(random_state=1, max_iter=300).fit(X_train, y_train)
>>> clf.predict_proba(X_test[:1])
array([[0.038..., 0.961...]])
>>> clf.predict(X_test[:5, :])
array([1, 0, 1, 0, 1])
>>> clf.score(X_test, y_test)
0.8...
Notes
-----
MLPClassifier trains iteratively since at each time step
the partial derivatives of the loss function with respect to the model
parameters are computed to update the parameters.
It can also have a regularization term added to the loss function
that shrinks model parameters to prevent overfitting.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values.
References
----------
Hinton, Geoffrey E.
"Connectionist learning procedures." Artificial intelligence 40.1
(1989): 185-234.
Glorot, Xavier, and Yoshua Bengio. "Understanding the difficulty of
training deep feedforward neural networks." International Conference
on Artificial Intelligence and Statistics. 2010.
He, Kaiming, et al. "Delving deep into rectifiers: Surpassing human-level
performance on imagenet classification." arXiv preprint
arXiv:1502.01852 (2015).
Kingma, Diederik, and Jimmy Ba. "Adam: A method for stochastic
optimization." arXiv preprint arXiv:1412.6980 (2014).
"""
@_deprecate_positional_args
def __init__(self, hidden_layer_sizes=(100,), activation="relu", *,
solver='adam', alpha=0.0001,
batch_size='auto', learning_rate="constant",
learning_rate_init=0.001, power_t=0.5, max_iter=200,
shuffle=True, random_state=None, tol=1e-4,
verbose=False, warm_start=False, momentum=0.9,
nesterovs_momentum=True, early_stopping=False,
validation_fraction=0.1, beta_1=0.9, beta_2=0.999,
epsilon=1e-8, n_iter_no_change=10, max_fun=15000):
super().__init__(
hidden_layer_sizes=hidden_layer_sizes,
activation=activation, solver=solver, alpha=alpha,
batch_size=batch_size, learning_rate=learning_rate,
learning_rate_init=learning_rate_init, power_t=power_t,
max_iter=max_iter, loss='log_loss', shuffle=shuffle,
random_state=random_state, tol=tol, verbose=verbose,
warm_start=warm_start, momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1, beta_2=beta_2, epsilon=epsilon,
n_iter_no_change=n_iter_no_change, max_fun=max_fun)
def _validate_input(self, X, y, incremental, reset):
X, y = self._validate_data(X, y, accept_sparse=['csr', 'csc'],
multi_output=True,
dtype=(np.float64, np.float32),
reset=reset)
if y.ndim == 2 and y.shape[1] == 1:
y = column_or_1d(y, warn=True)
# Matrix of actions to be taken under the possible combinations:
# The case that incremental == True and classes_ not defined is
# already checked by _check_partial_fit_first_call that is called
# in _partial_fit below.
# The cases are already grouped into the respective if blocks below.
#
# incremental warm_start classes_ def action
# 0 0 0 define classes_
# 0 1 0 define classes_
# 0 0 1 redefine classes_
#
# 0 1 1 check compat warm_start
# 1 1 1 check compat warm_start
#
# 1 0 1 check compat last fit
#
# Note the reliance on short-circuiting here, so that the second
# or part implies that classes_ is defined.
if (
(not hasattr(self, "classes_")) or
(not self.warm_start and not incremental)
):
self._label_binarizer = LabelBinarizer()
self._label_binarizer.fit(y)
self.classes_ = self._label_binarizer.classes_
else:
classes = unique_labels(y)
if self.warm_start:
if set(classes) != set(self.classes_):
raise ValueError(
f"warm_start can only be used where `y` has the same "
f"classes as in the previous call to fit. Previously "
f"got {self.classes_}, `y` has {classes}"
)
elif len(np.setdiff1d(classes, self.classes_, assume_unique=True)):
raise ValueError(
f"`y` has classes not in `self.classes_`. "
f"`self.classes_` has {self.classes_}. 'y' has {classes}."
)
# This downcast to bool is to prevent upcasting when working with
# float32 data
y = self._label_binarizer.transform(y).astype(bool)
return X, y
def predict(self, X):
"""Predict using the multi-layer perceptron classifier
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
y : ndarray, shape (n_samples,) or (n_samples, n_classes)
The predicted classes.
"""
check_is_fitted(self)
y_pred = self._forward_pass_fast(X)
if self.n_outputs_ == 1:
y_pred = y_pred.ravel()
return self._label_binarizer.inverse_transform(y_pred)
@property
def partial_fit(self):
"""Update the model with a single iteration over the given data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
y : array-like of shape (n_samples,)
The target values.
classes : array of shape (n_classes,), default=None
Classes across all calls to partial_fit.
Can be obtained via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns a trained MLP model.
"""
if self.solver not in _STOCHASTIC_SOLVERS:
raise AttributeError("partial_fit is only available for stochastic"
" optimizer. %s is not stochastic"
% self.solver)
return self._partial_fit
def _partial_fit(self, X, y, classes=None):
if _check_partial_fit_first_call(self, classes):
self._label_binarizer = LabelBinarizer()
if type_of_target(y).startswith('multilabel'):
self._label_binarizer.fit(y)
else:
self._label_binarizer.fit(classes)
super()._partial_fit(X, y)
return self
def predict_log_proba(self, X):
"""Return the log of probability estimates.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The input data.
Returns
-------
log_y_prob : ndarray of shape (n_samples, n_classes)
The predicted log-probability of the sample for each class
in the model, where classes are ordered as they are in
`self.classes_`. Equivalent to log(predict_proba(X))
"""
y_prob = self.predict_proba(X)
return np.log(y_prob, out=y_prob)
def predict_proba(self, X):
"""Probability estimates.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
y_prob : ndarray of shape (n_samples, n_classes)
The predicted probability of the sample for each class in the
model, where classes are ordered as they are in `self.classes_`.
"""
check_is_fitted(self)
y_pred = self._forward_pass_fast(X)
if self.n_outputs_ == 1:
y_pred = y_pred.ravel()
if y_pred.ndim == 1:
return np.vstack([1 - y_pred, y_pred]).T
else:
return y_pred
class MLPRegressor(RegressorMixin, BaseMultilayerPerceptron):
"""Multi-layer Perceptron regressor.
This model optimizes the squared-loss using LBFGS or stochastic gradient
descent.
.. versionadded:: 0.18
Parameters
----------
hidden_layer_sizes : tuple, length = n_layers - 2, default=(100,)
The ith element represents the number of neurons in the ith
hidden layer.
activation : {'identity', 'logistic', 'tanh', 'relu'}, default='relu'
Activation function for the hidden layer.
- 'identity', no-op activation, useful to implement linear bottleneck,
returns f(x) = x
- 'logistic', the logistic sigmoid function,
returns f(x) = 1 / (1 + exp(-x)).
- 'tanh', the hyperbolic tan function,
returns f(x) = tanh(x).
- 'relu', the rectified linear unit function,
returns f(x) = max(0, x)
solver : {'lbfgs', 'sgd', 'adam'}, default='adam'
The solver for weight optimization.
- 'lbfgs' is an optimizer in the family of quasi-Newton methods.
- 'sgd' refers to stochastic gradient descent.
- 'adam' refers to a stochastic gradient-based optimizer proposed by
Kingma, Diederik, and Jimmy Ba
Note: The default solver 'adam' works pretty well on relatively
large datasets (with thousands of training samples or more) in terms of
both training time and validation score.
For small datasets, however, 'lbfgs' can converge faster and perform
better.
alpha : float, default=0.0001
L2 penalty (regularization term) parameter.
batch_size : int, default='auto'
Size of minibatches for stochastic optimizers.
If the solver is 'lbfgs', the classifier will not use minibatch.
When set to "auto", `batch_size=min(200, n_samples)`
learning_rate : {'constant', 'invscaling', 'adaptive'}, default='constant'
Learning rate schedule for weight updates.
- 'constant' is a constant learning rate given by
'learning_rate_init'.
- 'invscaling' gradually decreases the learning rate ``learning_rate_``
at each time step 't' using an inverse scaling exponent of 'power_t'.
effective_learning_rate = learning_rate_init / pow(t, power_t)
- 'adaptive' keeps the learning rate constant to
'learning_rate_init' as long as training loss keeps decreasing.
Each time two consecutive epochs fail to decrease training loss by at
least tol, or fail to increase validation score by at least tol if
'early_stopping' is on, the current learning rate is divided by 5.
Only used when solver='sgd'.
learning_rate_init : double, default=0.001
The initial learning rate used. It controls the step-size
in updating the weights. Only used when solver='sgd' or 'adam'.
power_t : double, default=0.5
The exponent for inverse scaling learning rate.
It is used in updating effective learning rate when the learning_rate
is set to 'invscaling'. Only used when solver='sgd'.
max_iter : int, default=200
Maximum number of iterations. The solver iterates until convergence
(determined by 'tol') or this number of iterations. For stochastic
solvers ('sgd', 'adam'), note that this determines the number of epochs
(how many times each data point will be used), not the number of
gradient steps.
shuffle : bool, default=True
Whether to shuffle samples in each iteration. Only used when
solver='sgd' or 'adam'.
random_state : int, RandomState instance, default=None
Determines random number generation for weights and bias
initialization, train-test split if early stopping is used, and batch
sampling when solver='sgd' or 'adam'.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
tol : float, default=1e-4
Tolerance for the optimization. When the loss or score is not improving
by at least ``tol`` for ``n_iter_no_change`` consecutive iterations,
unless ``learning_rate`` is set to 'adaptive', convergence is
considered to be reached and training stops.
verbose : bool, default=False
Whether to print progress messages to stdout.
warm_start : bool, default=False
When set to True, reuse the solution of the previous
call to fit as initialization, otherwise, just erase the
previous solution. See :term:`the Glossary <warm_start>`.
momentum : float, default=0.9
Momentum for gradient descent update. Should be between 0 and 1. Only
used when solver='sgd'.
nesterovs_momentum : bool, default=True
Whether to use Nesterov's momentum. Only used when solver='sgd' and
momentum > 0.
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to true, it will automatically set
aside 10% of training data as validation and terminate training when
validation score is not improving by at least ``tol`` for
``n_iter_no_change`` consecutive epochs.
Only effective when solver='sgd' or 'adam'
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True
beta_1 : float, default=0.9
Exponential decay rate for estimates of first moment vector in adam,
should be in [0, 1). Only used when solver='adam'
beta_2 : float, default=0.999
Exponential decay rate for estimates of second moment vector in adam,
should be in [0, 1). Only used when solver='adam'
epsilon : float, default=1e-8
Value for numerical stability in adam. Only used when solver='adam'
n_iter_no_change : int, default=10
Maximum number of epochs to not meet ``tol`` improvement.
Only effective when solver='sgd' or 'adam'
.. versionadded:: 0.20
max_fun : int, default=15000
Only used when solver='lbfgs'. Maximum number of function calls.
The solver iterates until convergence (determined by 'tol'), number
of iterations reaches max_iter, or this number of function calls.
Note that number of function calls will be greater than or equal to
the number of iterations for the MLPRegressor.
.. versionadded:: 0.22
Attributes
----------
loss_ : float
The current loss computed with the loss function.
best_loss_ : float
The minimum loss reached by the solver throughout fitting.
loss_curve_ : list of shape (`n_iter_`,)
The ith element in the list represents the loss at the ith iteration.
t_ : int
The number of training samples seen by the solver during fitting.
coefs_ : list of shape (n_layers - 1,)
The ith element in the list represents the weight matrix corresponding
to layer i.
intercepts_ : list of shape (n_layers - 1,)
The ith element in the list represents the bias vector corresponding to
layer i + 1.
n_iter_ : int
The number of iterations the solver has ran.
n_layers_ : int
Number of layers.
n_outputs_ : int
Number of outputs.
out_activation_ : str
Name of the output activation function.
loss_curve_ : list of shape (n_iters,)
Loss value evaluated at the end of each training step.
t_ : int
Mathematically equals `n_iters * X.shape[0]`, it means
`time_step` and it is used by optimizer's learning rate scheduler.
Examples
--------
>>> from sklearn.neural_network import MLPRegressor
>>> from sklearn.datasets import make_regression
>>> from sklearn.model_selection import train_test_split
>>> X, y = make_regression(n_samples=200, random_state=1)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... random_state=1)
>>> regr = MLPRegressor(random_state=1, max_iter=500).fit(X_train, y_train)
>>> regr.predict(X_test[:2])
array([-0.9..., -7.1...])
>>> regr.score(X_test, y_test)
0.4...
Notes
-----
MLPRegressor trains iteratively since at each time step
the partial derivatives of the loss function with respect to the model
parameters are computed to update the parameters.
It can also have a regularization term added to the loss function
that shrinks model parameters to prevent overfitting.
This implementation works with data represented as dense and sparse numpy
arrays of floating point values.
References
----------
Hinton, Geoffrey E.
"Connectionist learning procedures." Artificial intelligence 40.1
(1989): 185-234.
Glorot, Xavier, and Yoshua Bengio. "Understanding the difficulty of
training deep feedforward neural networks." International Conference
on Artificial Intelligence and Statistics. 2010.
He, Kaiming, et al. "Delving deep into rectifiers: Surpassing human-level
performance on imagenet classification." arXiv preprint
arXiv:1502.01852 (2015).
Kingma, Diederik, and Jimmy Ba. "Adam: A method for stochastic
optimization." arXiv preprint arXiv:1412.6980 (2014).
"""
@_deprecate_positional_args
def __init__(self, hidden_layer_sizes=(100,), activation="relu", *,
solver='adam', alpha=0.0001,
batch_size='auto', learning_rate="constant",
learning_rate_init=0.001,
power_t=0.5, max_iter=200, shuffle=True,
random_state=None, tol=1e-4,
verbose=False, warm_start=False, momentum=0.9,
nesterovs_momentum=True, early_stopping=False,
validation_fraction=0.1, beta_1=0.9, beta_2=0.999,
epsilon=1e-8, n_iter_no_change=10, max_fun=15000):
super().__init__(
hidden_layer_sizes=hidden_layer_sizes,
activation=activation, solver=solver, alpha=alpha,
batch_size=batch_size, learning_rate=learning_rate,
learning_rate_init=learning_rate_init, power_t=power_t,
max_iter=max_iter, loss='squared_loss', shuffle=shuffle,
random_state=random_state, tol=tol, verbose=verbose,
warm_start=warm_start, momentum=momentum,
nesterovs_momentum=nesterovs_momentum,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
beta_1=beta_1, beta_2=beta_2, epsilon=epsilon,
n_iter_no_change=n_iter_no_change, max_fun=max_fun)
def predict(self, X):
"""Predict using the multi-layer perceptron model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
Returns
-------
y : ndarray of shape (n_samples, n_outputs)
The predicted values.
"""
check_is_fitted(self)
y_pred = self._forward_pass_fast(X)
if y_pred.shape[1] == 1:
return y_pred.ravel()
return y_pred
def _validate_input(self, X, y, incremental, reset):
X, y = self._validate_data(X, y, accept_sparse=['csr', 'csc'],
multi_output=True, y_numeric=True,
dtype=(np.float64, np.float32),
reset=reset)
if y.ndim == 2 and y.shape[1] == 1:
y = column_or_1d(y, warn=True)
return X, y
| bsd-3-clause |
LoLab-VU/pymc | pymc/diagnostics.py | 3 | 5373 | """Convergence diagnostics and model validation"""
import numpy as np
from .stats import autocorr, autocov, statfunc
from copy import copy
__all__ = ['geweke', 'gelman_rubin', 'trace_to_dataframe']
@statfunc
def geweke(x, first=.1, last=.5, intervals=20):
"""Return z-scores for convergence diagnostics.
Compare the mean of the first % of series with the mean of the last % of
series. x is divided into a number of segments for which this difference is
computed. If the series is converged, this score should oscillate between
-1 and 1.
Parameters
----------
x : array-like
The trace of some stochastic parameter.
first : float
The fraction of series at the beginning of the trace.
last : float
The fraction of series at the end to be compared with the section
at the beginning.
intervals : int
The number of segments.
Returns
-------
scores : list [[]]
Return a list of [i, score], where i is the starting index for each
interval and score the Geweke score on the interval.
Notes
-----
The Geweke score on some series x is computed by:
.. math:: \frac{E[x_s] - E[x_e]}{\sqrt{V[x_s] + V[x_e]}}
where :math:`E` stands for the mean, :math:`V` the variance,
:math:`x_s` a section at the start of the series and
:math:`x_e` a section at the end of the series.
References
----------
Geweke (1992)
"""
if np.rank(x) > 1:
return [geweke(y, first, last, intervals) for y in np.transpose(x)]
# Filter out invalid intervals
if first + last >= 1:
raise ValueError(
"Invalid intervals for Geweke convergence analysis",
(first,
last))
# Initialize list of z-scores
zscores = []
# Last index value
end = len(x) - 1
# Calculate starting indices
sindices = np.arange(0, end // 2, step=int((end / 2) / (intervals - 1)))
# Loop over start indices
for start in sindices:
# Calculate slices
first_slice = x[start: start + int(first * (end - start))]
last_slice = x[int(end - last * (end - start)):]
z = (first_slice.mean() - last_slice.mean())
z /= np.sqrt(first_slice.std() ** 2 + last_slice.std() ** 2)
zscores.append([start, z])
if intervals is None:
return np.array(zscores[0])
else:
return np.array(zscores)
def gelman_rubin(mtrace):
""" Returns estimate of R for a set of traces.
The Gelman-Rubin diagnostic tests for lack of convergence by comparing
the variance between multiple chains to the variance within each chain.
If convergence has been achieved, the between-chain and within-chain
variances should be identical. To be most effective in detecting evidence
for nonconvergence, each chain should have been initialized to starting
values that are dispersed relative to the target distribution.
Parameters
----------
mtrace : MultiTrace
A MultiTrace object containing parallel traces (minimum 2)
of one or more stochastic parameters.
Returns
-------
Rhat : dict
Returns dictionary of the potential scale reduction factors, :math:`\hat{R}`
Notes
-----
The diagnostic is computed by:
.. math:: \hat{R} = \frac{\hat{V}}{W}
where :math:`W` is the within-chain variance and :math:`\hat{V}` is
the posterior variance estimate for the pooled traces. This is the
potential scale reduction factor, which converges to unity when each
of the traces is a sample from the target posterior. Values greater
than one indicate that one or more chains have not yet converged.
References
----------
Brooks and Gelman (1998)
Gelman and Rubin (1992)"""
if mtrace.nchains < 2:
raise ValueError(
'Gelman-Rubin diagnostic requires multiple chains of the same length.')
def calc_rhat(x):
try:
# When the variable is multidimensional, this assignment will fail, triggering
# a ValueError that will handle the multidimensional case
m, n = x.shape
# Calculate between-chain variance
B = n * np.var(np.mean(x, axis=1), ddof=1)
# Calculate within-chain variance
W = np.mean(np.var(x, axis=1, ddof=1))
# Estimate of marginal posterior variance
Vhat = W*(n - 1)/n + B/n
return np.sqrt(Vhat/W)
except ValueError:
# Tricky transpose here, shifting the last dimension to the first
rotated_indices = np.roll(np.arange(x.ndim), 1)
# Now iterate over the dimension of the variable
return np.squeeze([calc_rhat(xi) for xi in x.transpose(rotated_indices)])
Rhat = {}
for var in mtrace.varnames:
# Get all traces for var
x = np.array(mtrace.get_values(var))
try:
Rhat[var] = calc_rhat(x)
except ValueError:
Rhat[var] = [calc_rhat(y.transpose()) for y in x.transpose()]
return Rhat
def trace_to_dataframe(trace):
"""Convert a PyMC trace consisting of 1-D variables to a pandas DataFrame
"""
import pandas as pd
return pd.DataFrame(
{varname: np.squeeze(trace.get_values(varname, combine=True))
for varname in trace.varnames})
| apache-2.0 |
sid88in/incubator-airflow | airflow/hooks/druid_hook.py | 6 | 6122 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import requests
import time
from pydruid.db import connect
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.dbapi_hook import DbApiHook
class DruidHook(BaseHook):
"""
Connection to Druid overlord for ingestion
:param druid_ingest_conn_id: The connection id to the Druid overlord machine
which accepts index jobs
:type druid_ingest_conn_id: str
:param timeout: The interval between polling
the Druid job for the status of the ingestion job.
Must be greater than or equal to 1
:type timeout: int
:param max_ingestion_time: The maximum ingestion time before assuming the job failed
:type max_ingestion_time: int
"""
def __init__(
self,
druid_ingest_conn_id='druid_ingest_default',
timeout=1,
max_ingestion_time=None):
self.druid_ingest_conn_id = druid_ingest_conn_id
self.timeout = timeout
self.max_ingestion_time = max_ingestion_time
self.header = {'content-type': 'application/json'}
if self.timeout < 1:
raise ValueError("Druid timeout should be equal or greater than 1")
def get_conn_url(self):
conn = self.get_connection(self.druid_ingest_conn_id)
host = conn.host
port = conn.port
conn_type = 'http' if not conn.conn_type else conn.conn_type
endpoint = conn.extra_dejson.get('endpoint', '')
return "{conn_type}://{host}:{port}/{endpoint}".format(**locals())
def submit_indexing_job(self, json_index_spec):
url = self.get_conn_url()
self.log.info("Druid ingestion spec: {}".format(json_index_spec))
req_index = requests.post(url, json=json_index_spec, headers=self.header)
if req_index.status_code != 200:
raise AirflowException('Did not get 200 when '
'submitting the Druid job to {}'.format(url))
req_json = req_index.json()
# Wait until the job is completed
druid_task_id = req_json['task']
self.log.info("Druid indexing task-id: {}".format(druid_task_id))
running = True
sec = 0
while running:
req_status = requests.get("{0}/{1}/status".format(url, druid_task_id))
self.log.info("Job still running for %s seconds...", sec)
if self.max_ingestion_time and sec > self.max_ingestion_time:
# ensure that the job gets killed if the max ingestion time is exceeded
requests.post("{0}/{1}/shutdown".format(url, druid_task_id))
raise AirflowException('Druid ingestion took more than '
'%s seconds', self.max_ingestion_time)
time.sleep(self.timeout)
sec = sec + self.timeout
status = req_status.json()['status']['status']
if status == 'RUNNING':
running = True
elif status == 'SUCCESS':
running = False # Great success!
elif status == 'FAILED':
raise AirflowException('Druid indexing job failed, '
'check console for more info')
else:
raise AirflowException('Could not get status of the job, got %s', status)
self.log.info('Successful index')
class DruidDbApiHook(DbApiHook):
"""
Interact with Druid broker
This hook is purely for users to query druid broker.
For ingestion, please use druidHook.
"""
conn_name_attr = 'druid_broker_conn_id'
default_conn_name = 'druid_broker_default'
supports_autocommit = False
def __init__(self, *args, **kwargs):
super(DruidDbApiHook, self).__init__(*args, **kwargs)
def get_conn(self):
"""
Establish a connection to druid broker.
"""
conn = self.get_connection(self.druid_broker_conn_id)
druid_broker_conn = connect(
host=conn.host,
port=conn.port,
path=conn.extra_dejson.get('endpoint', '/druid/v2/sql'),
scheme=conn.extra_dejson.get('schema', 'http')
)
self.log.info('Get the connection to druid '
'broker on {host}'.format(host=conn.host))
return druid_broker_conn
def get_uri(self):
"""
Get the connection uri for druid broker.
e.g: druid://localhost:8082/druid/v2/sql/
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
conn_type = 'druid' if not conn.conn_type else conn.conn_type
endpoint = conn.extra_dejson.get('endpoint', 'druid/v2/sql')
return '{conn_type}://{host}/{endpoint}'.format(
conn_type=conn_type, host=host, endpoint=endpoint)
def set_autocommit(self, conn, autocommit):
raise NotImplementedError()
def get_pandas_df(self, sql, parameters=None):
raise NotImplementedError()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
raise NotImplementedError()
| apache-2.0 |
amolkahat/pandas | pandas/tests/io/json/test_compression.py | 3 | 4389 | import pytest
import pandas as pd
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.util.testing import assert_frame_equal, assert_raises_regex
def test_compression_roundtrip(compression):
df = pd.DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with tm.ensure_clean() as path:
df.to_json(path, compression=compression)
assert_frame_equal(df, pd.read_json(path,
compression=compression))
# explicitly ensure file was compressed.
with tm.decompress_file(path, compression) as fh:
result = fh.read().decode('utf8')
assert_frame_equal(df, pd.read_json(result))
def test_read_zipped_json(datapath):
uncompressed_path = datapath("io", "json", "data", "tsframe_v012.json")
uncompressed_df = pd.read_json(uncompressed_path)
compressed_path = datapath("io", "json", "data", "tsframe_v012.json.zip")
compressed_df = pd.read_json(compressed_path, compression='zip')
assert_frame_equal(uncompressed_df, compressed_df)
@td.skip_if_not_us_locale
def test_with_s3_url(compression):
boto3 = pytest.importorskip('boto3')
pytest.importorskip('s3fs')
moto = pytest.importorskip('moto')
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
with moto.mock_s3():
conn = boto3.resource("s3", region_name="us-east-1")
bucket = conn.create_bucket(Bucket="pandas-test")
with tm.ensure_clean() as path:
df.to_json(path, compression=compression)
with open(path, 'rb') as f:
bucket.put_object(Key='test-1', Body=f)
roundtripped_df = pd.read_json('s3://pandas-test/test-1',
compression=compression)
assert_frame_equal(df, roundtripped_df)
def test_lines_with_compression(compression):
with tm.ensure_clean() as path:
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
df.to_json(path, orient='records', lines=True,
compression=compression)
roundtripped_df = pd.read_json(path, lines=True,
compression=compression)
assert_frame_equal(df, roundtripped_df)
def test_chunksize_with_compression(compression):
with tm.ensure_clean() as path:
df = pd.read_json('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}')
df.to_json(path, orient='records', lines=True,
compression=compression)
res = pd.read_json(path, lines=True, chunksize=1,
compression=compression)
roundtripped_df = pd.concat(res)
assert_frame_equal(df, roundtripped_df)
def test_write_unsupported_compression_type():
df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}')
with tm.ensure_clean() as path:
msg = "Unrecognized compression type: unsupported"
assert_raises_regex(ValueError, msg, df.to_json,
path, compression="unsupported")
def test_read_unsupported_compression_type():
with tm.ensure_clean() as path:
msg = "Unrecognized compression type: unsupported"
assert_raises_regex(ValueError, msg, pd.read_json,
path, compression="unsupported")
@pytest.mark.parametrize("to_infer", [True, False])
@pytest.mark.parametrize("read_infer", [True, False])
def test_to_json_compression(compression_only,
read_infer, to_infer):
# see gh-15008
compression = compression_only
if compression == "zip":
pytest.skip("{compression} is not supported "
"for to_csv".format(compression=compression))
# We'll complete file extension subsequently.
filename = "test."
if compression == "gzip":
filename += "gz"
else:
# xz --> .xz
# bz2 --> .bz2
filename += compression
df = pd.DataFrame({"A": [1]})
to_compression = "infer" if to_infer else compression
read_compression = "infer" if read_infer else compression
with tm.ensure_clean(filename) as path:
df.to_json(path, compression=to_compression)
result = pd.read_json(path, compression=read_compression)
tm.assert_frame_equal(result, df)
| bsd-3-clause |
nelson-liu/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 160 | 6028 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
mhue/scikit-learn | examples/linear_model/plot_ridge_path.py | 254 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
rjenc29/numerical | tests/test_ewma.py | 1 | 1275 | import pandas as pd
import numpy as np
from utilities.ewma import ewma
def sample_data_series():
data = np.arange(50).astype(float)
data[3] = np.nan
data[4] = np.nan
return pd.Series(data)
def test_ewma_adjust_and_ignore_na():
series = sample_data_series()
alpha = 0.1
expected = series.ewm(alpha=alpha, adjust=True, ignore_na=True).mean()
output = ewma(series.values, alpha, True, True)
assert np.allclose(expected, output)
def test_ewma_adjust():
series = sample_data_series()
alpha = 0.1
expected = series.ewm(alpha=alpha, adjust=True, ignore_na=False).mean()
output = ewma(series.values, alpha, True, False)
assert np.allclose(expected, output)
def test_ewma_ignore_na():
series = sample_data_series()
alpha = 0.1
expected = series.ewm(alpha=alpha, adjust=False, ignore_na=True).mean()
output = ewma(series.values, alpha, False, True)
assert np.allclose(expected, output)
def test_ewma():
series = sample_data_series()
alpha = 0.1
expected = series.ewm(alpha=alpha, adjust=False, ignore_na=False).mean()
output = ewma(series.values, alpha, False, False)
assert np.allclose(expected, output)
if __name__ == '__main__':
import pytest
pytest.main()
| mit |
kirbs-/edX-Learning-From-Data-Solutions | Final/Python/by_Mark_B2/hw8.py | 3 | 5383 | '''
Created on
@author: Mark
'''
import numpy as np
import sklearn.svm as svm
from hw6 import readIn
import random
from sklearn.cross_validation import KFold
from ctypes.test.test_bitfields import func
def getData():
return readIn('features.train'), readIn('features.test')
def classificationError(Y, Y_in):
return sum(Y != Y_in) / float(len(Y))
def prepareOneVsAll(data, class1):
return data[:,1:], (data[:,0] == class1)*2. - 1.
def prepareOneVsOne(data, class1, class2):
sel = np.logical_or(data[:,0] == class1, data[:,0] == class2)
return data[sel,1:], (data[sel,0] == class1)*2. - 1.
def polySVC(X, Y, C=.01, Q=2, coef0=1, verbose=False):
clf = svm.SVC(kernel='poly', C=C, degree=Q, coef0=coef0, gamma=1., verbose=verbose)
clf.fit(X, Y)
return classificationError(Y, clf.predict(X)), len(clf.support_vectors_), clf
def problem2_4():
train, test = getData()
for class1 in (1, 3, 5, 7, 9):
# for class1 in (0, 2, 4, 6, 8):
res = polySVC(*prepareOneVsAll(train, class1))
print '{} vs ALL E_in {:.4f} #SV {}' .format(class1, res[0], res[1])
# print len(res[2].dual_coef_[0]), res[2].dual_coef_
# problem2_4()
def problem5_6():
train, test = getData()
class1 = 1
class2 = 5
X, Y = prepareOneVsOne(train, class1, class2)
X_test, Y_test = prepareOneVsOne(test, class1, class2)
for C in (.0001, .001, .01, 1):
for Q in (2, 5):
E_in, SV, clf = polySVC(X, Y, C=C, Q=Q)
E_out = classificationError(Y_test, clf.predict(X_test))
print 'C={:5} Q={} E_in {} E_out {} #SV {}' .format(C, Q, E_in, E_out, SV)
# problem5_6()
def problem7_8():
train, test_out = getData()
class1 = 1
class2 = 5
options = [.0001, .001, .01, .1, 1.]
folds = 10
X, Y = prepareOneVsOne(train, class1, class2)
numberExperiments = 100
C_choosen = np.zeros(len(options))
E_cv_mean = np.zeros_like(C_choosen)
for experiment in range(numberExperiments):
kf = KFold(len(Y), folds, indices=False, shuffle=True)
E_cv = np.zeros_like(E_cv_mean)
for train, test in kf:
X_t = X[train,:]
Y_t = Y[train]
X_v = X[test,:]
Y_v = Y[test]
for i, C in enumerate(options):
clf = polySVC(X_t, Y_t, C=C)[2]
E_cv[i] += classificationError(Y_v, clf.predict(X_v))
C_choosen[np.argmin(E_cv)] += 1
E_cv_mean += E_cv/folds
print C_choosen, E_cv_mean/numberExperiments
C = options[np.argmax(C_choosen)]
X_v, Y_v = prepareOneVsOne(test_out, class1, class2)
print 'C', C, 'E_out', classificationError(Y_v, polySVC(X, Y, C=C)[2].predict(X_v))
# problem7_8()
def plotGraph(X, Y, clf, func=None):
import matplotlib.pyplot as plt
xx1 = np.linspace(-1., 1., 100)
xx2 = np.linspace(-1., 1., 100)
S = np.array([[x1, x2] for x2 in xx2 for x1 in xx1])
if func:
Y_s = func.apply(np.insert(S, 0, 1, 1))
Y = clf.predict(X)
else:
Y_s = clf.predict(S)
color = np.empty_like(Y_s, dtype=np.str_)
color[:] = 'white'
color[ Y_s > 0 ] = 'yellow'
pcolor = np.empty_like(Y, dtype=np.str_)
pcolor[:] = 'green'
pcolor[ Y > 0 ] = 'red'
plt.scatter(S[:,0], S[:,1], c=color, alpha=.3) #, marker, cmap, norm, vmin, vmax, alpha, linewidths, verts, hold)
plt.scatter(X[:,0], X[:,1], c=pcolor) #, marker, cmap, norm, vmin, vmax, alpha, linewidths, verts, hold)
plt.axis('tight')
plt.show()
def problem9_10():
train, test_out = getData()
class1 = 1
class2 = 5
X, Y = prepareOneVsOne(train, class1, class2)
X_v, Y_v = prepareOneVsOne(test_out, class1, class2)
for C in ( .01, 1., 100., 1e4, 1e6):
clf = svm.SVC(kernel='rbf', C=C, gamma=1., verbose=False)
clf.fit(X, Y)
# plotGraph(X, Y, clf)
print 'C={:9} E_in {}\t E_out {}'.format(C,
classificationError(Y, clf.predict(X)), classificationError(Y_v, clf.predict(X_v)))
# problem9_10()
def problemXX():
train, test_out = getData()
X, Y = train[:,1:], train[:,0]
X_v, Y_v = test_out[:,1:], test_out[:,0]
for C in (100.,):# .01, 1., 100., 1e4, 1e6):
clf = svm.SVC(kernel='rbf', C=C, gamma=10., verbose=True)
clf.fit(X, Y)
plotGraph(X, Y, clf)
print 'C={:9} E_in {}\t E_out {}'.format(C,
classificationError(Y, clf.predict(X)), classificationError(Y_v, clf.predict(X_v)))
# problemXX()
def cv():
# train, test = getData()
# class1 = 1
# class2 = 5
# X, Y = prepareOneVsOne(train, class1, class2)
# YY = 10*np.arange(10)
from sklearn.cross_validation import KFold
for i in range(10):
# prm = np.random.permutation(10)
# print a
# Y = YY[a]
# print YY
# print Y
print
# kf = KFold(len(Y), 3, indices=False, shuffle=True)
# for train, test in kf:
# print Y[train]
# print Y[test]
# break
# print kf
# first = np.array([[train, test] for train, test in kf])
# kf = KFold(len(Y), 3, indices=False, shuffle=True)
# for train, test in kf:
# print Y[train]
# print Y[test]
# break
# second = np.array([[train,test] for train, test in kf])
# print np.all(np.equal(first, second))
# cv()
def writeCSV():
p = getData()[0]
with open('out.csv', 'w') as f:
for s in p:
# str = ''
for ss in s:
f.write(str(ss)+',')
f.write('\n')
print p
# writeCSV()
| apache-2.0 |
sergiohzlz/complejos | Logistica/logisticmap.py | 1 | 1255 | #!/usr/bin/python
#-*-coding:utf8-*-
import sys
from pylab import *
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
plt.style.use('ggplot')
def logistica(x0,r, n=1000):
L = [x0]
for i in range(n):
x_act = L[-1]
L.append(r*x_act*(1-x_act))
return L
def bifurcaciones(x0,rango,step,n,k):
print("x0 ",x0)
print("rango ",rango[0]," - ",rango[1])
R = arange(rango[0],rango[1],step)
m =(rango[1]-rango[0])/step
m *= (n-k)
m += (n-k)
S = zeros((int(m),2))
print(S.shape)
i,j=0,0
for r in R:
#print "r {0}".format(r)
L = logistica(x0,r,n)
for p in L[k+1:]:
S[i]=(r,p)
i += 1
return S
if __name__=='__main__':
figura = sys.argv[1]
inicio = float(sys.argv[2])
fin = float(sys.argv[3])
x0 = float(sys.argv[4])
rango = [inicio, fin]
iters = 500
#x0 = 0.2
S = bifurcaciones(x0, rango,0.001,iters,200)
f = open('salida.dat','w')
for s in S:
f.write(str(s[0]) +'\t' + str(s[1]) + '\n')
f.close()
fig = figure()
title('Bifurcaciones para el rango '+str(rango))
xlim(tuple(rango))
scatter(S[:,0],S[:,1], s=0.01, c='blue')
savefig(figura+'.png')
| gpl-2.0 |
kmspriyatham/symath | scipy/scipy/interpolate/tests/test_rbf.py | 4 | 4162 | #!/usr/bin/env python
# Created by John Travers, Robert Hetland, 2007
""" Test functions for rbf module """
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal,
assert_almost_equal, run_module_suite)
from numpy import linspace, sin, random, exp, allclose
from scipy.interpolate.rbf import Rbf
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
'cubic', 'quintic', 'thin-plate', 'linear')
def check_rbf1d_interpolation(function):
"""Check that the Rbf function interpolates throught the nodes (1D)"""
olderr = np.seterr(all="ignore")
try:
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=function)
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
finally:
np.seterr(**olderr)
def check_rbf2d_interpolation(function):
"""Check that the Rbf function interpolates throught the nodes (2D)"""
olderr = np.seterr(all="ignore")
try:
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = x*exp(-x**2-1j*y**2)
rbf = Rbf(x, y, z, epsilon=2, function=function)
zi = rbf(x, y)
zi.shape = x.shape
assert_array_almost_equal(z, zi)
finally:
np.seterr(**olderr)
def check_rbf3d_interpolation(function):
"""Check that the Rbf function interpolates throught the nodes (3D)"""
olderr = np.seterr(all="ignore")
try:
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = random.rand(50,1)*4-2
d = x*exp(-x**2-y**2)
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
di = rbf(x, y, z)
di.shape = x.shape
assert_array_almost_equal(di, d)
finally:
np.seterr(**olderr)
def test_rbf_interpolation():
for function in FUNCTIONS:
yield check_rbf1d_interpolation, function
yield check_rbf2d_interpolation, function
yield check_rbf3d_interpolation, function
def check_rbf1d_regularity(function, atol):
"""Check that the Rbf function approximates a smooth function well away
from the nodes."""
olderr = np.seterr(all="ignore")
try:
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, function=function)
xi = linspace(0, 10, 100)
yi = rbf(xi)
#import matplotlib.pyplot as plt
#plt.figure()
#plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-')
#plt.title(function)
#plt.show()
msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
assert_(allclose(yi, sin(xi), atol=atol), msg)
finally:
np.seterr(**olderr)
def test_rbf_regularity():
tolerances = {
'multiquadric': 0.05,
'inverse multiquadric': 0.02,
'gaussian': 0.01,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.1,
'linear': 0.2
}
for function in FUNCTIONS:
yield check_rbf1d_regularity, function, tolerances.get(function, 1e-2)
def test_default_construction():
"""Check that the Rbf class can be constructed with the default
multiquadric basis function. Regression test for ticket #1228."""
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_function_is_callable():
"""Check that the Rbf class can be constructed with function=callable."""
x = linspace(0,10,9)
y = sin(x)
linfunc = lambda x:x
rbf = Rbf(x, y, function=linfunc)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_two_arg_function_is_callable():
"""Check that the Rbf class can be constructed with a two argument
function=callable."""
def _func(self, r):
return self.epsilon + r
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=_func)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_rbf_epsilon_none():
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, epsilon=None)
if __name__ == "__main__":
run_module_suite()
| apache-2.0 |
frank-tancf/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 408 | 8061 | import re
import inspect
import textwrap
import pydoc
from .docscrape import NumpyDocString
from .docscrape import FunctionDoc
from .docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
import sphinx # local import to avoid test dependency
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Methods',):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
dpshelio/scikit-image | skimage/viewer/tests/test_tools.py | 19 | 5681 | from collections import namedtuple
import numpy as np
from numpy.testing import assert_equal
from numpy.testing.decorators import skipif
from skimage import data
from skimage.viewer import ImageViewer, has_qt
from skimage.viewer.canvastools import (
LineTool, ThickLineTool, RectangleTool, PaintTool)
from skimage.viewer.canvastools.base import CanvasToolBase
try:
from matplotlib.testing.decorators import cleanup
except ImportError:
def cleanup(func):
return func
def get_end_points(image):
h, w = image.shape[0:2]
x = [w / 3, 2 * w / 3]
y = [h / 2] * 2
return np.transpose([x, y])
def do_event(viewer, etype, button=1, xdata=0, ydata=0, key=None):
"""
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used
for scroll events)
*key*
the key depressed when the mouse event triggered (see
:class:`KeyEvent`)
*step*
number of scroll steps (positive for 'up', negative for 'down')
"""
ax = viewer.ax
event = namedtuple('Event',
('name canvas guiEvent x y inaxes xdata ydata '
'button key step'))
event.button = button
event.x, event.y = ax.transData.transform((xdata, ydata))
event.xdata, event.ydata = xdata, ydata
event.inaxes = ax
event.canvas = ax.figure.canvas
event.key = key
event.step = 1
event.guiEvent = None
event.name = 'Custom'
func = getattr(viewer._event_manager, 'on_%s' % etype)
func(event)
@cleanup
@skipif(not has_qt)
def test_line_tool():
img = data.camera()
viewer = ImageViewer(img)
tool = LineTool(viewer, maxdist=10, line_props=dict(linewidth=3),
handle_props=dict(markersize=5))
tool.end_points = get_end_points(img)
assert_equal(tool.end_points, np.array([[170, 256], [341, 256]]))
# grab a handle and move it
do_event(viewer, 'mouse_press', xdata=170, ydata=256)
do_event(viewer, 'move', xdata=180, ydata=260)
do_event(viewer, 'mouse_release')
assert_equal(tool.geometry, np.array([[180, 260], [341, 256]]))
# create a new line
do_event(viewer, 'mouse_press', xdata=10, ydata=10)
do_event(viewer, 'move', xdata=100, ydata=100)
do_event(viewer, 'mouse_release')
assert_equal(tool.geometry, np.array([[100, 100], [10, 10]]))
@cleanup
@skipif(not has_qt)
def test_thick_line_tool():
img = data.camera()
viewer = ImageViewer(img)
tool = ThickLineTool(viewer, maxdist=10, line_props=dict(color='red'),
handle_props=dict(markersize=5))
tool.end_points = get_end_points(img)
do_event(viewer, 'scroll', button='up')
assert_equal(tool.linewidth, 2)
do_event(viewer, 'scroll', button='down')
assert_equal(tool.linewidth, 1)
do_event(viewer, 'key_press', key='+')
assert_equal(tool.linewidth, 2)
do_event(viewer, 'key_press', key='-')
assert_equal(tool.linewidth, 1)
@cleanup
@skipif(not has_qt)
def test_rect_tool():
img = data.camera()
viewer = ImageViewer(img)
tool = RectangleTool(viewer, maxdist=10)
tool.extents = (100, 150, 100, 150)
assert_equal(tool.corners,
((100, 150, 150, 100), (100, 100, 150, 150)))
assert_equal(tool.extents, (100, 150, 100, 150))
assert_equal(tool.edge_centers,
((100, 125.0, 150, 125.0), (125.0, 100, 125.0, 150)))
assert_equal(tool.geometry, (100, 150, 100, 150))
# grab a corner and move it
do_event(viewer, 'mouse_press', xdata=100, ydata=100)
do_event(viewer, 'move', xdata=120, ydata=120)
do_event(viewer, 'mouse_release')
assert_equal(tool.geometry, [120, 150, 120, 150])
# create a new line
do_event(viewer, 'mouse_press', xdata=10, ydata=10)
do_event(viewer, 'move', xdata=100, ydata=100)
do_event(viewer, 'mouse_release')
assert_equal(tool.geometry, [10, 100, 10, 100])
@cleanup
@skipif(not has_qt)
def test_paint_tool():
img = data.moon()
viewer = ImageViewer(img)
tool = PaintTool(viewer, img.shape)
tool.radius = 10
assert_equal(tool.radius, 10)
tool.label = 2
assert_equal(tool.label, 2)
assert_equal(tool.shape, img.shape)
do_event(viewer, 'mouse_press', xdata=100, ydata=100)
do_event(viewer, 'move', xdata=110, ydata=110)
do_event(viewer, 'mouse_release')
assert_equal(tool.overlay[tool.overlay == 2].size, 761)
tool.label = 5
do_event(viewer, 'mouse_press', xdata=20, ydata=20)
do_event(viewer, 'move', xdata=40, ydata=40)
do_event(viewer, 'mouse_release')
assert_equal(tool.overlay[tool.overlay == 5].size, 881)
assert_equal(tool.overlay[tool.overlay == 2].size, 761)
do_event(viewer, 'key_press', key='enter')
tool.overlay = tool.overlay * 0
assert_equal(tool.overlay.sum(), 0)
@cleanup
@skipif(not has_qt)
def test_base_tool():
img = data.moon()
viewer = ImageViewer(img)
tool = CanvasToolBase(viewer)
tool.set_visible(False)
tool.set_visible(True)
do_event(viewer, 'key_press', key='enter')
tool.redraw()
tool.remove()
tool = CanvasToolBase(viewer, useblit=False)
tool.redraw()
| bsd-3-clause |
florian-f/sklearn | examples/plot_johnson_lindenstrauss_bound.py | 4 | 7402 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespectively of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
import sys
from time import time
import numpy as np
import pylab as pl
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = pl.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
pl.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
pl.loglog(n_samples_range, min_n_components, color=color)
pl.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
pl.xlabel("Number of observations to eps-embed")
pl.ylabel("Minimum number of dimensions")
pl.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
pl.show()
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = pl.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
pl.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
pl.semilogy(eps_range, min_n_components, color=color)
pl.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
pl.xlabel("Distortion eps")
pl.ylabel("Minimum number of dimensions")
pl.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
pl.show()
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
pl.figure()
pl.hexbin(dists, projected_dists, gridsize=100)
pl.xlabel("Pairwise squared distances in original space")
pl.ylabel("Pairwise squared distances in projected space")
pl.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = pl.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
pl.figure()
pl.hist(rates, bins=50, normed=True, range=(0., 2.))
pl.xlabel("Squared distances rate: projected / original")
pl.ylabel("Distribution of samples pairs")
pl.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
pl.show()
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
| bsd-3-clause |
WindCanDie/spark | python/pyspark/sql/tests/test_pandas_udf_grouped_agg.py | 7 | 18847 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from pyspark.rdd import PythonEvalType
from pyspark.sql.functions import array, explode, col, lit, mean, sum, \
udf, pandas_udf, PandasUDFType
from pyspark.sql.types import *
from pyspark.sql.utils import AnalysisException
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class GroupedAggPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i * 1.0) + col('id') for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))) \
.drop('vs') \
.withColumn('w', lit(1.0))
@property
def python_plus_one(self):
@udf('double')
def plus_one(v):
assert isinstance(v, (int, float))
return v + 1
return plus_one
@property
def pandas_scalar_plus_two(self):
import pandas as pd
@pandas_udf('double', PandasUDFType.SCALAR)
def plus_two(v):
assert isinstance(v, pd.Series)
return v + 2
return plus_two
@property
def pandas_agg_mean_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def avg(v):
return v.mean()
return avg
@property
def pandas_agg_sum_udf(self):
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def sum(v):
return v.sum()
return sum
@property
def pandas_agg_weighted_mean_udf(self):
import numpy as np
@pandas_udf('double', PandasUDFType.GROUPED_AGG)
def weighted_mean(v, w):
return np.average(v, weights=w)
return weighted_mean
def test_manual(self):
df = self.data
sum_udf = self.pandas_agg_sum_udf
mean_udf = self.pandas_agg_mean_udf
mean_arr_udf = pandas_udf(
self.pandas_agg_mean_udf.func,
ArrayType(self.pandas_agg_mean_udf.returnType),
self.pandas_agg_mean_udf.evalType)
result1 = df.groupby('id').agg(
sum_udf(df.v),
mean_udf(df.v),
mean_arr_udf(array(df.v))).sort('id')
expected1 = self.spark.createDataFrame(
[[0, 245.0, 24.5, [24.5]],
[1, 255.0, 25.5, [25.5]],
[2, 265.0, 26.5, [26.5]],
[3, 275.0, 27.5, [27.5]],
[4, 285.0, 28.5, [28.5]],
[5, 295.0, 29.5, [29.5]],
[6, 305.0, 30.5, [30.5]],
[7, 315.0, 31.5, [31.5]],
[8, 325.0, 32.5, [32.5]],
[9, 335.0, 33.5, [33.5]]],
['id', 'sum(v)', 'avg(v)', 'avg(array(v))'])
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_basic(self):
df = self.data
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
# Groupby one column and aggregate one UDF with literal
result1 = df.groupby('id').agg(weighted_mean_udf(df.v, lit(1.0))).sort('id')
expected1 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
# Groupby one expression and aggregate one UDF with literal
result2 = df.groupby((col('id') + 1)).agg(weighted_mean_udf(df.v, lit(1.0)))\
.sort(df.id + 1)
expected2 = df.groupby((col('id') + 1))\
.agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort(df.id + 1)
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
# Groupby one column and aggregate one UDF without literal
result3 = df.groupby('id').agg(weighted_mean_udf(df.v, df.w)).sort('id')
expected3 = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, w)')).sort('id')
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
# Groupby one expression and aggregate one UDF without literal
result4 = df.groupby((col('id') + 1).alias('id'))\
.agg(weighted_mean_udf(df.v, df.w))\
.sort('id')
expected4 = df.groupby((col('id') + 1).alias('id'))\
.agg(mean(df.v).alias('weighted_mean(v, w)'))\
.sort('id')
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
def test_unsupported_types(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
pandas_udf(
lambda x: x,
ArrayType(ArrayType(TimestampType())),
PandasUDFType.GROUPED_AGG)
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
@pandas_udf('mean double, std double', PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return v.mean(), v.std()
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, 'not supported'):
@pandas_udf(MapType(DoubleType(), DoubleType()), PandasUDFType.GROUPED_AGG)
def mean_and_std_udf(v):
return {v.mean(): v.std()}
def test_alias(self):
df = self.data
mean_udf = self.pandas_agg_mean_udf
result1 = df.groupby('id').agg(mean_udf(df.v).alias('mean_alias'))
expected1 = df.groupby('id').agg(mean(df.v).alias('mean_alias'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_mixed_sql(self):
"""
Test mixing group aggregate pandas UDF with sql expression.
"""
df = self.data
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF with sql expression
result1 = (df.groupby('id')
.agg(sum_udf(df.v) + 1)
.sort('id'))
expected1 = (df.groupby('id')
.agg(sum(df.v) + 1)
.sort('id'))
# Mix group aggregate pandas UDF with sql expression (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(df.v + 1))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(df.v + 1))
.sort('id'))
# Wrap group aggregate pandas UDF with two sql expressions
result3 = (df.groupby('id')
.agg(sum_udf(df.v + 1) + 2)
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(df.v + 1) + 2)
.sort('id'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
def test_mixed_udfs(self):
"""
Test mixing group aggregate pandas UDF with python UDF and scalar pandas UDF.
"""
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Mix group aggregate pandas UDF and python UDF
result1 = (df.groupby('id')
.agg(plus_one(sum_udf(df.v)))
.sort('id'))
expected1 = (df.groupby('id')
.agg(plus_one(sum(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and python UDF (order swapped)
result2 = (df.groupby('id')
.agg(sum_udf(plus_one(df.v)))
.sort('id'))
expected2 = (df.groupby('id')
.agg(sum(plus_one(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF
result3 = (df.groupby('id')
.agg(sum_udf(plus_two(df.v)))
.sort('id'))
expected3 = (df.groupby('id')
.agg(sum(plus_two(df.v)))
.sort('id'))
# Mix group aggregate pandas UDF and scalar pandas UDF (order swapped)
result4 = (df.groupby('id')
.agg(plus_two(sum_udf(df.v)))
.sort('id'))
expected4 = (df.groupby('id')
.agg(plus_two(sum(df.v)))
.sort('id'))
# Wrap group aggregate pandas UDF with two python UDFs and use python UDF in groupby
result5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum_udf(plus_one(df.v))))
.sort('plus_one(id)'))
expected5 = (df.groupby(plus_one(df.id))
.agg(plus_one(sum(plus_one(df.v))))
.sort('plus_one(id)'))
# Wrap group aggregate pandas UDF with two scala pandas UDF and user scala pandas UDF in
# groupby
result6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum_udf(plus_two(df.v))))
.sort('plus_two(id)'))
expected6 = (df.groupby(plus_two(df.id))
.agg(plus_two(sum(plus_two(df.v))))
.sort('plus_two(id)'))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
self.assertPandasEqual(expected5.toPandas(), result5.toPandas())
self.assertPandasEqual(expected6.toPandas(), result6.toPandas())
def test_multiple_udfs(self):
"""
Test multiple group aggregate pandas UDFs in one agg function.
"""
df = self.data
mean_udf = self.pandas_agg_mean_udf
sum_udf = self.pandas_agg_sum_udf
weighted_mean_udf = self.pandas_agg_weighted_mean_udf
result1 = (df.groupBy('id')
.agg(mean_udf(df.v),
sum_udf(df.v),
weighted_mean_udf(df.v, df.w))
.sort('id')
.toPandas())
expected1 = (df.groupBy('id')
.agg(mean(df.v),
sum(df.v),
mean(df.v).alias('weighted_mean(v, w)'))
.sort('id')
.toPandas())
self.assertPandasEqual(expected1, result1)
def test_complex_groupby(self):
df = self.data
sum_udf = self.pandas_agg_sum_udf
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
# groupby one expression
result1 = df.groupby(df.v % 2).agg(sum_udf(df.v))
expected1 = df.groupby(df.v % 2).agg(sum(df.v))
# empty groupby
result2 = df.groupby().agg(sum_udf(df.v))
expected2 = df.groupby().agg(sum(df.v))
# groupby one column and one sql expression
result3 = df.groupby(df.id, df.v % 2).agg(sum_udf(df.v)).orderBy(df.id, df.v % 2)
expected3 = df.groupby(df.id, df.v % 2).agg(sum(df.v)).orderBy(df.id, df.v % 2)
# groupby one python UDF
result4 = df.groupby(plus_one(df.id)).agg(sum_udf(df.v))
expected4 = df.groupby(plus_one(df.id)).agg(sum(df.v))
# groupby one scalar pandas UDF
result5 = df.groupby(plus_two(df.id)).agg(sum_udf(df.v))
expected5 = df.groupby(plus_two(df.id)).agg(sum(df.v))
# groupby one expression and one python UDF
result6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum_udf(df.v))
expected6 = df.groupby(df.v % 2, plus_one(df.id)).agg(sum(df.v))
# groupby one expression and one scalar pandas UDF
result7 = df.groupby(df.v % 2, plus_two(df.id)).agg(sum_udf(df.v)).sort('sum(v)')
expected7 = df.groupby(df.v % 2, plus_two(df.id)).agg(sum(df.v)).sort('sum(v)')
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
self.assertPandasEqual(expected2.toPandas(), result2.toPandas())
self.assertPandasEqual(expected3.toPandas(), result3.toPandas())
self.assertPandasEqual(expected4.toPandas(), result4.toPandas())
self.assertPandasEqual(expected5.toPandas(), result5.toPandas())
self.assertPandasEqual(expected6.toPandas(), result6.toPandas())
self.assertPandasEqual(expected7.toPandas(), result7.toPandas())
def test_complex_expressions(self):
df = self.data
plus_one = self.python_plus_one
plus_two = self.pandas_scalar_plus_two
sum_udf = self.pandas_agg_sum_udf
# Test complex expressions with sql expression, python UDF and
# group aggregate pandas UDF
result1 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_one(sum_udf(col('v1'))),
sum_udf(plus_one(col('v2'))))
.sort('id')
.toPandas())
expected1 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_one(sum(col('v1'))),
sum(plus_one(col('v2'))))
.sort('id')
.toPandas())
# Test complex expressions with sql expression, scala pandas UDF and
# group aggregate pandas UDF
result2 = (df.withColumn('v1', plus_one(df.v))
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum_udf(col('v')),
sum_udf(col('v1') + 3),
sum_udf(col('v2')) + 5,
plus_two(sum_udf(col('v1'))),
sum_udf(plus_two(col('v2'))))
.sort('id')
.toPandas())
expected2 = (df.withColumn('v1', df.v + 1)
.withColumn('v2', df.v + 2)
.groupby(df.id, df.v % 2)
.agg(sum(col('v')),
sum(col('v1') + 3),
sum(col('v2')) + 5,
plus_two(sum(col('v1'))),
sum(plus_two(col('v2'))))
.sort('id')
.toPandas())
# Test sequential groupby aggregate
result3 = (df.groupby('id')
.agg(sum_udf(df.v).alias('v'))
.groupby('id')
.agg(sum_udf(col('v')))
.sort('id')
.toPandas())
expected3 = (df.groupby('id')
.agg(sum(df.v).alias('v'))
.groupby('id')
.agg(sum(col('v')))
.sort('id')
.toPandas())
self.assertPandasEqual(expected1, result1)
self.assertPandasEqual(expected2, result2)
self.assertPandasEqual(expected3, result3)
def test_retain_group_columns(self):
with self.sql_conf({"spark.sql.retainGroupColumns": False}):
df = self.data
sum_udf = self.pandas_agg_sum_udf
result1 = df.groupby(df.id).agg(sum_udf(df.v))
expected1 = df.groupby(df.id).agg(sum(df.v))
self.assertPandasEqual(expected1.toPandas(), result1.toPandas())
def test_array_type(self):
df = self.data
array_udf = pandas_udf(lambda x: [1.0, 2.0], 'array<double>', PandasUDFType.GROUPED_AGG)
result1 = df.groupby('id').agg(array_udf(df['v']).alias('v2'))
self.assertEquals(result1.first()['v2'], [1.0, 2.0])
def test_invalid_args(self):
df = self.data
plus_one = self.python_plus_one
mean_udf = self.pandas_agg_mean_udf
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'nor.*aggregate function'):
df.groupby(df.id).agg(plus_one(df.v)).collect()
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'aggregate function.*argument.*aggregate function'):
df.groupby(df.id).agg(mean_udf(mean_udf(df.v))).collect()
with QuietTest(self.sc):
with self.assertRaisesRegexp(
AnalysisException,
'mixture.*aggregate function.*group aggregate pandas UDF'):
df.groupby(df.id).agg(mean_udf(df.v), mean(df.v)).collect()
def test_register_vectorized_udf_basic(self):
sum_pandas_udf = pandas_udf(
lambda v: v.sum(), "integer", PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
self.assertEqual(sum_pandas_udf.evalType, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
group_agg_pandas_udf = self.spark.udf.register("sum_pandas_udf", sum_pandas_udf)
self.assertEqual(group_agg_pandas_udf.evalType, PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF)
q = "SELECT sum_pandas_udf(v1) FROM VALUES (3, 0), (2, 0), (1, 1) tbl(v1, v2) GROUP BY v2"
actual = sorted(map(lambda r: r[0], self.spark.sql(q).collect()))
expected = [1, 5]
self.assertEqual(actual, expected)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_grouped_agg import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
pratapvardhan/scikit-learn | sklearn/linear_model/tests/test_sag.py | 33 | 28228 | # Authors: Danny Sullivan <[email protected]>
# Tom Dupre la Tour <[email protected]>
#
# Licence: BSD 3 clause
import math
import numpy as np
import scipy.sparse as sp
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import _multinomial_grad_loss_all_samples
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.linear_model.base import make_dataset
from sklearn.linear_model.logistic import _multinomial_loss_grad
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import row_norms
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import compute_class_weight
from sklearn.utils import check_random_state
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
from sklearn.datasets import make_blobs, load_iris
from sklearn.base import clone
iris = load_iris()
# this is used for sag classification
def log_dloss(p, y):
z = p * y
# approximately equal and saves the computation of the log
if z > 18.0:
return math.exp(-z) * -y
if z < -18.0:
return -y
return -y / (math.exp(z) + 1.0)
def log_loss(p, y):
return np.mean(np.log(1. + np.exp(-y * p)))
# this is used for sag regression
def squared_dloss(p, y):
return p - y
def squared_loss(p, y):
return np.mean(0.5 * (p - y) * (p - y))
# function for measuring the log loss
def get_pobj(w, alpha, myX, myy, loss):
w = w.ravel()
pred = np.dot(myX, w)
p = loss(pred, myy)
p += alpha * w.dot(w) / 2.
return p
def sag(X, y, step_size, alpha, n_iter=1, dloss=None, sparse=False,
sample_weight=None, fit_intercept=True):
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(X.shape[1])
sum_gradient = np.zeros(X.shape[1])
gradient_memory = np.zeros((n_samples, n_features))
intercept = 0.0
intercept_sum_gradient = 0.0
intercept_gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
decay = 1.0
seen = set()
# sparse data has a fixed decay of .01
if sparse:
decay = .01
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
# idx = k
entry = X[idx]
seen.add(idx)
p = np.dot(entry, weights) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient + alpha * weights
sum_gradient += update - gradient_memory[idx]
gradient_memory[idx] = update
if fit_intercept:
intercept_sum_gradient += (gradient -
intercept_gradient_memory[idx])
intercept_gradient_memory[idx] = gradient
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
weights -= step_size * sum_gradient / len(seen)
return weights, intercept
def sag_sparse(X, y, step_size, alpha, n_iter=1,
dloss=None, sample_weight=None, sparse=False,
fit_intercept=True):
if step_size * alpha == 1.:
raise ZeroDivisionError("Sparse sag does not handle the case "
"step_size * alpha == 1")
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=np.int)
gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
# sparse data has a fixed decay of .01
if sparse:
decay = .01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
# idx = k
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter
p = (wscale * np.dot(entry, weights)) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
sum_gradient += update - (gradient_memory[idx] * entry)
if fit_intercept:
intercept_sum_gradient += gradient - gradient_memory[idx]
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
gradient_memory[idx] = gradient
wscale *= (1.0 - alpha * step_size)
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = (c_sum[counter - 1] +
step_size / (wscale * len(seen)))
if counter >= 1 and wscale < 1e-9:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
weights *= wscale
return weights, intercept
def get_step_size(X, alpha, fit_intercept, classification=True):
if classification:
return (4.0 / (np.max(np.sum(X * X, axis=1))
+ fit_intercept + 4.0 * alpha))
else:
return 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha)
@ignore_warnings
def test_classifier_matching():
n_samples = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
y[y == 0] = -1
alpha = 1.1
n_iter = 80
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept)
clf = LogisticRegression(solver="sag", fit_intercept=fit_intercept,
tol=1e-11, C=1. / alpha / n_samples,
max_iter=n_iter, random_state=10)
clf.fit(X, y)
weights, intercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights = np.atleast_2d(weights)
intercept = np.atleast_1d(intercept)
weights2 = np.atleast_2d(weights2)
intercept2 = np.atleast_1d(intercept2)
assert_array_almost_equal(weights, clf.coef_, decimal=10)
assert_array_almost_equal(intercept, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_regressor_matching():
n_samples = 10
n_features = 5
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
alpha = 1.
n_iter = 100
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha * n_samples, max_iter=n_iter)
clf.fit(X, y)
weights1, intercept1 = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
assert_array_almost_equal(weights1, clf.coef_, decimal=10)
assert_array_almost_equal(intercept1, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_sag_pobj_matches_logistic_regression():
"""tests if the sag pobj matches log reg"""
n_samples = 100
alpha = 1.0
max_iter = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
clf1 = LogisticRegression(solver='sag', fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf2 = clone(clf1)
clf3 = LogisticRegression(fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, log_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, log_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, log_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj2, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj1, decimal=4)
@ignore_warnings
def test_sag_pobj_matches_ridge_regression():
"""tests if the sag pobj matches ridge reg"""
n_samples = 100
n_features = 10
alpha = 1.0
n_iter = 100
fit_intercept = False
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
clf1 = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha, max_iter=n_iter, random_state=42)
clf2 = clone(clf1)
clf3 = Ridge(fit_intercept=fit_intercept, tol=.00001, solver='lsqr',
alpha=alpha, max_iter=n_iter, random_state=42)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, squared_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, squared_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, squared_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj1, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj2, decimal=4)
@ignore_warnings
def test_sag_regressor_computed_correctly():
"""tests if the sag regressor is computed correctly"""
alpha = .1
n_features = 10
n_samples = 40
max_iter = 50
tol = .000001
fit_intercept = True
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w) + 2.
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf1 = Ridge(fit_intercept=fit_intercept, tol=tol, solver='sag',
alpha=alpha * n_samples, max_iter=max_iter)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights1, spintercept1 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights1.ravel(),
decimal=3)
assert_almost_equal(clf1.intercept_, spintercept1, decimal=1)
# TODO: uncomment when sparse Ridge with intercept will be fixed (#4710)
#assert_array_almost_equal(clf2.coef_.ravel(),
# spweights2.ravel(),
# decimal=3)
#assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)'''
@ignore_warnings
def test_get_auto_step_size():
X = np.array([[1, 2, 3], [2, 3, 4], [2, 3, 2]], dtype=np.float64)
alpha = 1.2
fit_intercept = False
# sum the squares of the second sample because that's the largest
max_squared_sum = 4 + 9 + 16
max_squared_sum_ = row_norms(X, squared=True).max()
assert_almost_equal(max_squared_sum, max_squared_sum_, decimal=4)
for fit_intercept in (True, False):
step_size_sqr = 1.0 / (max_squared_sum + alpha + int(fit_intercept))
step_size_log = 4.0 / (max_squared_sum + 4.0 * alpha +
int(fit_intercept))
step_size_sqr_ = get_auto_step_size(max_squared_sum_, alpha, "squared",
fit_intercept)
step_size_log_ = get_auto_step_size(max_squared_sum_, alpha, "log",
fit_intercept)
assert_almost_equal(step_size_sqr, step_size_sqr_, decimal=4)
assert_almost_equal(step_size_log, step_size_log_, decimal=4)
msg = 'Unknown loss function for SAG solver, got wrong instead of'
assert_raise_message(ValueError, msg, get_auto_step_size,
max_squared_sum_, alpha, "wrong", fit_intercept)
@ignore_warnings
def test_sag_regressor():
"""tests if the sag regressor performs well"""
xmin, xmax = -5, 5
n_samples = 20
tol = .001
max_iter = 20
alpha = 0.1
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.99)
assert_greater(score2, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.5)
assert_greater(score2, 0.5)
@ignore_warnings
def test_sag_classifier_computed_correctly():
"""tests if the binary classifier is computed correctly"""
alpha = .1
n_samples = 50
n_iter = 50
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_sag_multiclass_computed_correctly():
"""tests if the multiclass classifier is computed correctly"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 40
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
sparse=True,
fit_intercept=fit_intercept)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
@ignore_warnings
def test_classifier_results():
"""tests if classifier results match target"""
alpha = .1
n_features = 20
n_samples = 10
tol = .01
max_iter = 200
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w)
y = np.sign(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert_almost_equal(pred1, y, decimal=12)
assert_almost_equal(pred2, y, decimal=12)
@ignore_warnings
def test_binary_classifier_class_weight():
"""tests binary classifier with classweights for each class"""
alpha = .1
n_samples = 50
n_iter = 20
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=10,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
class_weight = {1: .45, -1: .55}
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_multiclass_classifier_class_weight():
"""tests multiclass with classweights for each class"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 50
class_weight = {0: .45, 1: .55, 2: .75}
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight,
sparse=True)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
def test_classifier_single_class():
"""tests if ValueError is thrown with only one class"""
X = [[1, 2], [3, 4]]
y = [1, 1]
assert_raise_message(ValueError,
"This solver needs samples of at least 2 classes "
"in the data",
LogisticRegression(solver='sag').fit,
X, y)
def test_step_size_alpha_error():
X = [[0, 0], [0, 0]]
y = [1, -1]
fit_intercept = False
alpha = 1.
msg = ("Current sag implementation does not handle the case"
" step_size * alpha_scaled == 1")
clf1 = LogisticRegression(solver='sag', C=1. / alpha,
fit_intercept=fit_intercept)
assert_raise_message(ZeroDivisionError, msg, clf1.fit, X, y)
clf2 = Ridge(fit_intercept=fit_intercept, solver='sag', alpha=alpha)
assert_raise_message(ZeroDivisionError, msg, clf2.fit, X, y)
def test_multinomial_loss():
# test if the multinomial loss and gradient computations are consistent
X, y = iris.data, iris.target.astype(np.float64)
n_samples, n_features = X.shape
n_classes = len(np.unique(y))
rng = check_random_state(42)
weights = rng.randn(n_features, n_classes)
intercept = rng.randn(n_classes)
sample_weights = rng.randn(n_samples)
np.abs(sample_weights, sample_weights)
# compute loss and gradient like in multinomial SAG
dataset, _ = make_dataset(X, y, sample_weights, random_state=42)
loss_1, grad_1 = _multinomial_grad_loss_all_samples(dataset, weights,
intercept, n_samples,
n_features, n_classes)
# compute loss and gradient like in multinomial LogisticRegression
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
weights_intercept = np.vstack((weights, intercept)).T.ravel()
loss_2, grad_2, _ = _multinomial_loss_grad(weights_intercept, X, Y_bin,
0.0, sample_weights)
grad_2 = grad_2.reshape(n_classes, -1)
grad_2 = grad_2[:, :-1].T
# comparison
assert_array_almost_equal(grad_1, grad_2)
assert_almost_equal(loss_1, loss_2)
def test_multinomial_loss_ground_truth():
# n_samples, n_features, n_classes = 4, 2, 3
n_classes = 3
X = np.array([[1.1, 2.2], [2.2, -4.4], [3.3, -2.2], [1.1, 1.1]])
y = np.array([0, 1, 2, 0])
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
weights = np.array([[0.1, 0.2, 0.3], [1.1, 1.2, -1.3]])
intercept = np.array([1., 0, -.2])
sample_weights = np.array([0.8, 1, 1, 0.8])
prediction = np.dot(X, weights) + intercept
logsumexp_prediction = logsumexp(prediction, axis=1)
p = prediction - logsumexp_prediction[:, np.newaxis]
loss_1 = -(sample_weights[:, np.newaxis] * p * Y_bin).sum()
diff = sample_weights[:, np.newaxis] * (np.exp(p) - Y_bin)
grad_1 = np.dot(X.T, diff)
weights_intercept = np.vstack((weights, intercept)).T.ravel()
loss_2, grad_2, _ = _multinomial_loss_grad(weights_intercept, X, Y_bin,
0.0, sample_weights)
grad_2 = grad_2.reshape(n_classes, -1)
grad_2 = grad_2[:, :-1].T
assert_almost_equal(loss_1, loss_2)
assert_array_almost_equal(grad_1, grad_2)
# ground truth
loss_gt = 11.680360354325961
grad_gt = np.array([[-0.557487, -1.619151, +2.176638],
[-0.903942, +5.258745, -4.354803]])
assert_almost_equal(loss_1, loss_gt)
assert_array_almost_equal(grad_1, grad_gt)
| bsd-3-clause |
datapythonista/pandas | pandas/tests/window/test_groupby.py | 3 | 34849 | import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
to_datetime,
)
import pandas._testing as tm
from pandas.api.indexers import BaseIndexer
from pandas.core.groupby.groupby import get_groupby
class TestRolling:
def setup_method(self):
self.frame = DataFrame({"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)})
def test_mutated(self):
msg = r"groupby\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
self.frame.groupby("A", foo=1)
g = self.frame.groupby("A")
assert not g.mutated
g = get_groupby(self.frame, by="A", mutated=True)
assert g.mutated
def test_getitem(self):
g = self.frame.groupby("A")
g_mutated = get_groupby(self.frame, by="A", mutated=True)
expected = g_mutated.B.apply(lambda x: x.rolling(2).mean())
result = g.rolling(2).mean().B
tm.assert_series_equal(result, expected)
result = g.rolling(2).B.mean()
tm.assert_series_equal(result, expected)
result = g.B.rolling(2).mean()
tm.assert_series_equal(result, expected)
result = self.frame.B.groupby(self.frame.A).rolling(2).mean()
tm.assert_series_equal(result, expected)
def test_getitem_multiple(self):
# GH 13174
g = self.frame.groupby("A")
r = g.rolling(2, min_periods=0)
g_mutated = get_groupby(self.frame, by="A", mutated=True)
expected = g_mutated.B.apply(lambda x: x.rolling(2, min_periods=0).count())
result = r.B.count()
tm.assert_series_equal(result, expected)
result = r.B.count()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"f",
[
"sum",
"mean",
"min",
"max",
pytest.param(
"count",
marks=pytest.mark.filterwarnings("ignore:min_periods:FutureWarning"),
),
"kurt",
"skew",
],
)
def test_rolling(self, f):
g = self.frame.groupby("A")
r = g.rolling(window=4)
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.rolling(4), f)())
# groupby.apply doesn't drop the grouped-by column
expected = expected.drop("A", axis=1)
# GH 39732
expected_index = MultiIndex.from_arrays([self.frame["A"], range(40)])
expected.index = expected_index
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("f", ["std", "var"])
def test_rolling_ddof(self, f):
g = self.frame.groupby("A")
r = g.rolling(window=4)
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.rolling(4), f)(ddof=1))
# groupby.apply doesn't drop the grouped-by column
expected = expected.drop("A", axis=1)
# GH 39732
expected_index = MultiIndex.from_arrays([self.frame["A"], range(40)])
expected.index = expected_index
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "midpoint", "nearest"]
)
def test_rolling_quantile(self, interpolation):
g = self.frame.groupby("A")
r = g.rolling(window=4)
result = r.quantile(0.4, interpolation=interpolation)
expected = g.apply(
lambda x: x.rolling(4).quantile(0.4, interpolation=interpolation)
)
# groupby.apply doesn't drop the grouped-by column
expected = expected.drop("A", axis=1)
# GH 39732
expected_index = MultiIndex.from_arrays([self.frame["A"], range(40)])
expected.index = expected_index
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("f", ["corr", "cov"])
def test_rolling_corr_cov(self, f):
g = self.frame.groupby("A")
r = g.rolling(window=4)
result = getattr(r, f)(self.frame)
def func(x):
return getattr(x.rolling(4), f)(self.frame)
expected = g.apply(func)
# GH 39591: The grouped column should be all np.nan
# (groupby.apply inserts 0s for cov)
expected["A"] = np.nan
tm.assert_frame_equal(result, expected)
result = getattr(r.B, f)(pairwise=True)
def func(x):
return getattr(x.B.rolling(4), f)(pairwise=True)
expected = g.apply(func)
tm.assert_series_equal(result, expected)
def test_rolling_apply(self, raw):
g = self.frame.groupby("A")
r = g.rolling(window=4)
# reduction
result = r.apply(lambda x: x.sum(), raw=raw)
expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw))
# groupby.apply doesn't drop the grouped-by column
expected = expected.drop("A", axis=1)
# GH 39732
expected_index = MultiIndex.from_arrays([self.frame["A"], range(40)])
expected.index = expected_index
tm.assert_frame_equal(result, expected)
def test_rolling_apply_mutability(self):
# GH 14013
df = DataFrame({"A": ["foo"] * 3 + ["bar"] * 3, "B": [1] * 6})
g = df.groupby("A")
mi = MultiIndex.from_tuples(
[("bar", 3), ("bar", 4), ("bar", 5), ("foo", 0), ("foo", 1), ("foo", 2)]
)
mi.names = ["A", None]
# Grouped column should not be a part of the output
expected = DataFrame([np.nan, 2.0, 2.0] * 2, columns=["B"], index=mi)
result = g.rolling(window=2).sum()
tm.assert_frame_equal(result, expected)
# Call an arbitrary function on the groupby
g.sum()
# Make sure nothing has been mutated
result = g.rolling(window=2).sum()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("expected_value,raw_value", [[1.0, True], [0.0, False]])
def test_groupby_rolling(self, expected_value, raw_value):
# GH 31754
def foo(x):
return int(isinstance(x, np.ndarray))
df = DataFrame({"id": [1, 1, 1], "value": [1, 2, 3]})
result = df.groupby("id").value.rolling(1).apply(foo, raw=raw_value)
expected = Series(
[expected_value] * 3,
index=MultiIndex.from_tuples(((1, 0), (1, 1), (1, 2)), names=["id", None]),
name="value",
)
tm.assert_series_equal(result, expected)
def test_groupby_rolling_center_center(self):
# GH 35552
series = Series(range(1, 6))
result = series.groupby(series).rolling(center=True, window=3).mean()
expected = Series(
[np.nan] * 5,
index=MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3), (5, 4))),
)
tm.assert_series_equal(result, expected)
series = Series(range(1, 5))
result = series.groupby(series).rolling(center=True, window=3).mean()
expected = Series(
[np.nan] * 4,
index=MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3))),
)
tm.assert_series_equal(result, expected)
df = DataFrame({"a": ["a"] * 5 + ["b"] * 6, "b": range(11)})
result = df.groupby("a").rolling(center=True, window=3).mean()
expected = DataFrame(
[np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, 9, np.nan],
index=MultiIndex.from_tuples(
(
("a", 0),
("a", 1),
("a", 2),
("a", 3),
("a", 4),
("b", 5),
("b", 6),
("b", 7),
("b", 8),
("b", 9),
("b", 10),
),
names=["a", None],
),
columns=["b"],
)
tm.assert_frame_equal(result, expected)
df = DataFrame({"a": ["a"] * 5 + ["b"] * 5, "b": range(10)})
result = df.groupby("a").rolling(center=True, window=3).mean()
expected = DataFrame(
[np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, np.nan],
index=MultiIndex.from_tuples(
(
("a", 0),
("a", 1),
("a", 2),
("a", 3),
("a", 4),
("b", 5),
("b", 6),
("b", 7),
("b", 8),
("b", 9),
),
names=["a", None],
),
columns=["b"],
)
tm.assert_frame_equal(result, expected)
def test_groupby_rolling_center_on(self):
# GH 37141
df = DataFrame(
data={
"Date": date_range("2020-01-01", "2020-01-10"),
"gb": ["group_1"] * 6 + ["group_2"] * 4,
"value": range(10),
}
)
result = (
df.groupby("gb")
.rolling(6, on="Date", center=True, min_periods=1)
.value.mean()
)
expected = Series(
[1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 7.0, 7.5, 7.5, 7.5],
name="value",
index=MultiIndex.from_tuples(
(
("group_1", Timestamp("2020-01-01")),
("group_1", Timestamp("2020-01-02")),
("group_1", Timestamp("2020-01-03")),
("group_1", Timestamp("2020-01-04")),
("group_1", Timestamp("2020-01-05")),
("group_1", Timestamp("2020-01-06")),
("group_2", Timestamp("2020-01-07")),
("group_2", Timestamp("2020-01-08")),
("group_2", Timestamp("2020-01-09")),
("group_2", Timestamp("2020-01-10")),
),
names=["gb", "Date"],
),
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("min_periods", [5, 4, 3])
def test_groupby_rolling_center_min_periods(self, min_periods):
# GH 36040
df = DataFrame({"group": ["A"] * 10 + ["B"] * 10, "data": range(20)})
window_size = 5
result = (
df.groupby("group")
.rolling(window_size, center=True, min_periods=min_periods)
.mean()
)
result = result.reset_index()[["group", "data"]]
grp_A_mean = [1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 7.5, 8.0]
grp_B_mean = [x + 10.0 for x in grp_A_mean]
num_nans = max(0, min_periods - 3) # For window_size of 5
nans = [np.nan] * num_nans
grp_A_expected = nans + grp_A_mean[num_nans : 10 - num_nans] + nans
grp_B_expected = nans + grp_B_mean[num_nans : 10 - num_nans] + nans
expected = DataFrame(
{"group": ["A"] * 10 + ["B"] * 10, "data": grp_A_expected + grp_B_expected}
)
tm.assert_frame_equal(result, expected)
def test_groupby_subselect_rolling(self):
# GH 35486
df = DataFrame(
{"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0], "c": [10, 20, 30, 20]}
)
result = df.groupby("a")[["b"]].rolling(2).max()
expected = DataFrame(
[np.nan, np.nan, 2.0, np.nan],
columns=["b"],
index=MultiIndex.from_tuples(
((1, 0), (2, 1), (2, 3), (3, 2)), names=["a", None]
),
)
tm.assert_frame_equal(result, expected)
result = df.groupby("a")["b"].rolling(2).max()
expected = Series(
[np.nan, np.nan, 2.0, np.nan],
index=MultiIndex.from_tuples(
((1, 0), (2, 1), (2, 3), (3, 2)), names=["a", None]
),
name="b",
)
tm.assert_series_equal(result, expected)
def test_groupby_rolling_custom_indexer(self):
# GH 35557
class SimpleIndexer(BaseIndexer):
def get_window_bounds(
self, num_values=0, min_periods=None, center=None, closed=None
):
min_periods = self.window_size if min_periods is None else 0
end = np.arange(num_values, dtype=np.int64) + 1
start = end.copy() - self.window_size
start[start < 0] = min_periods
return start, end
df = DataFrame(
{"a": [1.0, 2.0, 3.0, 4.0, 5.0] * 3}, index=[0] * 5 + [1] * 5 + [2] * 5
)
result = (
df.groupby(df.index)
.rolling(SimpleIndexer(window_size=3), min_periods=1)
.sum()
)
expected = df.groupby(df.index).rolling(window=3, min_periods=1).sum()
tm.assert_frame_equal(result, expected)
def test_groupby_rolling_subset_with_closed(self):
# GH 35549
df = DataFrame(
{
"column1": range(6),
"column2": range(6),
"group": 3 * ["A", "B"],
"date": [Timestamp("2019-01-01")] * 6,
}
)
result = (
df.groupby("group").rolling("1D", on="date", closed="left")["column1"].sum()
)
expected = Series(
[np.nan, 0.0, 2.0, np.nan, 1.0, 4.0],
index=MultiIndex.from_tuples(
[("A", Timestamp("2019-01-01"))] * 3
+ [("B", Timestamp("2019-01-01"))] * 3,
names=["group", "date"],
),
name="column1",
)
tm.assert_series_equal(result, expected)
def test_groupby_subset_rolling_subset_with_closed(self):
# GH 35549
df = DataFrame(
{
"column1": range(6),
"column2": range(6),
"group": 3 * ["A", "B"],
"date": [Timestamp("2019-01-01")] * 6,
}
)
result = (
df.groupby("group")[["column1", "date"]]
.rolling("1D", on="date", closed="left")["column1"]
.sum()
)
expected = Series(
[np.nan, 0.0, 2.0, np.nan, 1.0, 4.0],
index=MultiIndex.from_tuples(
[("A", Timestamp("2019-01-01"))] * 3
+ [("B", Timestamp("2019-01-01"))] * 3,
names=["group", "date"],
),
name="column1",
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["max", "min"])
def test_groupby_rolling_index_changed(self, func):
# GH: #36018 nlevels of MultiIndex changed
ds = Series(
[1, 2, 2],
index=MultiIndex.from_tuples(
[("a", "x"), ("a", "y"), ("c", "z")], names=["1", "2"]
),
name="a",
)
result = getattr(ds.groupby(ds).rolling(2), func)()
expected = Series(
[np.nan, np.nan, 2.0],
index=MultiIndex.from_tuples(
[(1, "a", "x"), (2, "a", "y"), (2, "c", "z")], names=["a", "1", "2"]
),
name="a",
)
tm.assert_series_equal(result, expected)
def test_groupby_rolling_empty_frame(self):
# GH 36197
expected = DataFrame({"s1": []})
result = expected.groupby("s1").rolling(window=1).sum()
# GH 32262
expected = expected.drop(columns="s1")
# GH-38057 from_tuples gives empty object dtype, we now get float/int levels
# expected.index = MultiIndex.from_tuples([], names=["s1", None])
expected.index = MultiIndex.from_product(
[Index([], dtype="float64"), Index([], dtype="int64")], names=["s1", None]
)
tm.assert_frame_equal(result, expected)
expected = DataFrame({"s1": [], "s2": []})
result = expected.groupby(["s1", "s2"]).rolling(window=1).sum()
# GH 32262
expected = expected.drop(columns=["s1", "s2"])
expected.index = MultiIndex.from_product(
[
Index([], dtype="float64"),
Index([], dtype="float64"),
Index([], dtype="int64"),
],
names=["s1", "s2", None],
)
tm.assert_frame_equal(result, expected)
def test_groupby_rolling_string_index(self):
# GH: 36727
df = DataFrame(
[
["A", "group_1", Timestamp(2019, 1, 1, 9)],
["B", "group_1", Timestamp(2019, 1, 2, 9)],
["Z", "group_2", Timestamp(2019, 1, 3, 9)],
["H", "group_1", Timestamp(2019, 1, 6, 9)],
["E", "group_2", Timestamp(2019, 1, 20, 9)],
],
columns=["index", "group", "eventTime"],
).set_index("index")
groups = df.groupby("group")
df["count_to_date"] = groups.cumcount()
rolling_groups = groups.rolling("10d", on="eventTime")
result = rolling_groups.apply(lambda df: df.shape[0])
expected = DataFrame(
[
["A", "group_1", Timestamp(2019, 1, 1, 9), 1.0],
["B", "group_1", Timestamp(2019, 1, 2, 9), 2.0],
["H", "group_1", Timestamp(2019, 1, 6, 9), 3.0],
["Z", "group_2", Timestamp(2019, 1, 3, 9), 1.0],
["E", "group_2", Timestamp(2019, 1, 20, 9), 1.0],
],
columns=["index", "group", "eventTime", "count_to_date"],
).set_index(["group", "index"])
tm.assert_frame_equal(result, expected)
def test_groupby_rolling_no_sort(self):
# GH 36889
result = (
DataFrame({"foo": [2, 1], "bar": [2, 1]})
.groupby("foo", sort=False)
.rolling(1)
.min()
)
expected = DataFrame(
np.array([[2.0, 2.0], [1.0, 1.0]]),
columns=["foo", "bar"],
index=MultiIndex.from_tuples([(2, 0), (1, 1)], names=["foo", None]),
)
# GH 32262
expected = expected.drop(columns="foo")
tm.assert_frame_equal(result, expected)
def test_groupby_rolling_count_closed_on(self):
# GH 35869
df = DataFrame(
{
"column1": range(6),
"column2": range(6),
"group": 3 * ["A", "B"],
"date": date_range(end="20190101", periods=6),
}
)
result = (
df.groupby("group")
.rolling("3d", on="date", closed="left")["column1"]
.count()
)
expected = Series(
[np.nan, 1.0, 1.0, np.nan, 1.0, 1.0],
name="column1",
index=MultiIndex.from_tuples(
[
("A", Timestamp("2018-12-27")),
("A", Timestamp("2018-12-29")),
("A", Timestamp("2018-12-31")),
("B", Timestamp("2018-12-28")),
("B", Timestamp("2018-12-30")),
("B", Timestamp("2019-01-01")),
],
names=["group", "date"],
),
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
("func", "kwargs"),
[("rolling", {"window": 2, "min_periods": 1}), ("expanding", {})],
)
def test_groupby_rolling_sem(self, func, kwargs):
# GH: 26476
df = DataFrame(
[["a", 1], ["a", 2], ["b", 1], ["b", 2], ["b", 3]], columns=["a", "b"]
)
result = getattr(df.groupby("a"), func)(**kwargs).sem()
expected = DataFrame(
{"a": [np.nan] * 5, "b": [np.nan, 0.70711, np.nan, 0.70711, 0.70711]},
index=MultiIndex.from_tuples(
[("a", 0), ("a", 1), ("b", 2), ("b", 3), ("b", 4)], names=["a", None]
),
)
# GH 32262
expected = expected.drop(columns="a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
("rollings", "key"), [({"on": "a"}, "a"), ({"on": None}, "index")]
)
def test_groupby_rolling_nans_in_index(self, rollings, key):
# GH: 34617
df = DataFrame(
{
"a": to_datetime(["2020-06-01 12:00", "2020-06-01 14:00", np.nan]),
"b": [1, 2, 3],
"c": [1, 1, 1],
}
)
if key == "index":
df = df.set_index("a")
with pytest.raises(ValueError, match=f"{key} must be monotonic"):
df.groupby("c").rolling("60min", **rollings)
@pytest.mark.parametrize("group_keys", [True, False])
def test_groupby_rolling_group_keys(self, group_keys):
# GH 37641
# GH 38523: GH 37641 actually was not a bug.
# group_keys only applies to groupby.apply directly
arrays = [["val1", "val1", "val2"], ["val1", "val1", "val2"]]
index = MultiIndex.from_arrays(arrays, names=("idx1", "idx2"))
s = Series([1, 2, 3], index=index)
result = s.groupby(["idx1", "idx2"], group_keys=group_keys).rolling(1).mean()
expected = Series(
[1.0, 2.0, 3.0],
index=MultiIndex.from_tuples(
[
("val1", "val1", "val1", "val1"),
("val1", "val1", "val1", "val1"),
("val2", "val2", "val2", "val2"),
],
names=["idx1", "idx2", "idx1", "idx2"],
),
)
tm.assert_series_equal(result, expected)
def test_groupby_rolling_index_level_and_column_label(self):
# The groupby keys should not appear as a resulting column
arrays = [["val1", "val1", "val2"], ["val1", "val1", "val2"]]
index = MultiIndex.from_arrays(arrays, names=("idx1", "idx2"))
df = DataFrame({"A": [1, 1, 2], "B": range(3)}, index=index)
result = df.groupby(["idx1", "A"]).rolling(1).mean()
expected = DataFrame(
{"B": [0.0, 1.0, 2.0]},
index=MultiIndex.from_tuples(
[
("val1", 1, "val1", "val1"),
("val1", 1, "val1", "val1"),
("val2", 2, "val2", "val2"),
],
names=["idx1", "A", "idx1", "idx2"],
),
)
tm.assert_frame_equal(result, expected)
def test_groupby_rolling_resulting_multiindex(self):
# a few different cases checking the created MultiIndex of the result
# https://github.com/pandas-dev/pandas/pull/38057
# grouping by 1 columns -> 2-level MI as result
df = DataFrame({"a": np.arange(8.0), "b": [1, 2] * 4})
result = df.groupby("b").rolling(3).mean()
expected_index = MultiIndex.from_tuples(
[(1, 0), (1, 2), (1, 4), (1, 6), (2, 1), (2, 3), (2, 5), (2, 7)],
names=["b", None],
)
tm.assert_index_equal(result.index, expected_index)
# grouping by 2 columns -> 3-level MI as result
df = DataFrame({"a": np.arange(12.0), "b": [1, 2] * 6, "c": [1, 2, 3, 4] * 3})
result = df.groupby(["b", "c"]).rolling(2).sum()
expected_index = MultiIndex.from_tuples(
[
(1, 1, 0),
(1, 1, 4),
(1, 1, 8),
(1, 3, 2),
(1, 3, 6),
(1, 3, 10),
(2, 2, 1),
(2, 2, 5),
(2, 2, 9),
(2, 4, 3),
(2, 4, 7),
(2, 4, 11),
],
names=["b", "c", None],
)
tm.assert_index_equal(result.index, expected_index)
# grouping with 1 level on dataframe with 2-level MI -> 3-level MI as result
df = DataFrame({"a": np.arange(8.0), "b": [1, 2] * 4, "c": [1, 2, 3, 4] * 2})
df = df.set_index("c", append=True)
result = df.groupby("b").rolling(3).mean()
expected_index = MultiIndex.from_tuples(
[
(1, 0, 1),
(1, 2, 3),
(1, 4, 1),
(1, 6, 3),
(2, 1, 2),
(2, 3, 4),
(2, 5, 2),
(2, 7, 4),
],
names=["b", None, "c"],
)
tm.assert_index_equal(result.index, expected_index)
def test_groupby_rolling_object_doesnt_affect_groupby_apply(self):
# GH 39732
g = self.frame.groupby("A")
expected = g.apply(lambda x: x.rolling(4).sum()).index
_ = g.rolling(window=4)
result = g.apply(lambda x: x.rolling(4).sum()).index
tm.assert_index_equal(result, expected)
assert not g.mutated
assert not g.grouper.mutated
@pytest.mark.parametrize(
"columns", [MultiIndex.from_tuples([("A", ""), ("B", "C")]), ["A", "B"]]
)
def test_by_column_not_in_values(self, columns):
# GH 32262
df = DataFrame([[1, 0]] * 20 + [[2, 0]] * 12 + [[3, 0]] * 8, columns=columns)
g = df.groupby("A")
original_obj = g.obj.copy(deep=True)
r = g.rolling(4)
result = r.sum()
assert "A" not in result.columns
tm.assert_frame_equal(g.obj, original_obj)
def test_groupby_level(self):
# GH 38523, 38787
arrays = [
["Falcon", "Falcon", "Parrot", "Parrot"],
["Captive", "Wild", "Captive", "Wild"],
]
index = MultiIndex.from_arrays(arrays, names=("Animal", "Type"))
df = DataFrame({"Max Speed": [390.0, 350.0, 30.0, 20.0]}, index=index)
result = df.groupby(level=0)["Max Speed"].rolling(2).sum()
expected = Series(
[np.nan, 740.0, np.nan, 50.0],
index=MultiIndex.from_tuples(
[
("Falcon", "Falcon", "Captive"),
("Falcon", "Falcon", "Wild"),
("Parrot", "Parrot", "Captive"),
("Parrot", "Parrot", "Wild"),
],
names=["Animal", "Animal", "Type"],
),
name="Max Speed",
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"by, expected_data",
[
[["id"], {"num": [100.0, 150.0, 150.0, 200.0]}],
[
["id", "index"],
{
"date": [
Timestamp("2018-01-01"),
Timestamp("2018-01-02"),
Timestamp("2018-01-01"),
Timestamp("2018-01-02"),
],
"num": [100.0, 200.0, 150.0, 250.0],
},
],
],
)
def test_as_index_false(self, by, expected_data):
# GH 39433
data = [
["A", "2018-01-01", 100.0],
["A", "2018-01-02", 200.0],
["B", "2018-01-01", 150.0],
["B", "2018-01-02", 250.0],
]
df = DataFrame(data, columns=["id", "date", "num"])
df["date"] = to_datetime(df["date"])
df = df.set_index(["date"])
gp_by = [getattr(df, attr) for attr in by]
result = (
df.groupby(gp_by, as_index=False).rolling(window=2, min_periods=1).mean()
)
expected = {"id": ["A", "A", "B", "B"]}
expected.update(expected_data)
expected = DataFrame(
expected,
index=df.index,
)
tm.assert_frame_equal(result, expected)
class TestExpanding:
def setup_method(self):
self.frame = DataFrame({"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)})
@pytest.mark.parametrize(
"f", ["sum", "mean", "min", "max", "count", "kurt", "skew"]
)
def test_expanding(self, f):
g = self.frame.groupby("A")
r = g.expanding()
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.expanding(), f)())
# groupby.apply doesn't drop the grouped-by column
expected = expected.drop("A", axis=1)
# GH 39732
expected_index = MultiIndex.from_arrays([self.frame["A"], range(40)])
expected.index = expected_index
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("f", ["std", "var"])
def test_expanding_ddof(self, f):
g = self.frame.groupby("A")
r = g.expanding()
result = getattr(r, f)(ddof=0)
expected = g.apply(lambda x: getattr(x.expanding(), f)(ddof=0))
# groupby.apply doesn't drop the grouped-by column
expected = expected.drop("A", axis=1)
# GH 39732
expected_index = MultiIndex.from_arrays([self.frame["A"], range(40)])
expected.index = expected_index
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "midpoint", "nearest"]
)
def test_expanding_quantile(self, interpolation):
g = self.frame.groupby("A")
r = g.expanding()
result = r.quantile(0.4, interpolation=interpolation)
expected = g.apply(
lambda x: x.expanding().quantile(0.4, interpolation=interpolation)
)
# groupby.apply doesn't drop the grouped-by column
expected = expected.drop("A", axis=1)
# GH 39732
expected_index = MultiIndex.from_arrays([self.frame["A"], range(40)])
expected.index = expected_index
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("f", ["corr", "cov"])
def test_expanding_corr_cov(self, f):
g = self.frame.groupby("A")
r = g.expanding()
result = getattr(r, f)(self.frame)
def func(x):
return getattr(x.expanding(), f)(self.frame)
expected = g.apply(func)
# GH 39591: groupby.apply returns 1 instead of nan for windows
# with all nan values
null_idx = list(range(20, 61)) + list(range(72, 113))
expected.iloc[null_idx, 1] = np.nan
# GH 39591: The grouped column should be all np.nan
# (groupby.apply inserts 0s for cov)
expected["A"] = np.nan
tm.assert_frame_equal(result, expected)
result = getattr(r.B, f)(pairwise=True)
def func(x):
return getattr(x.B.expanding(), f)(pairwise=True)
expected = g.apply(func)
tm.assert_series_equal(result, expected)
def test_expanding_apply(self, raw):
g = self.frame.groupby("A")
r = g.expanding()
# reduction
result = r.apply(lambda x: x.sum(), raw=raw)
expected = g.apply(lambda x: x.expanding().apply(lambda y: y.sum(), raw=raw))
# groupby.apply doesn't drop the grouped-by column
expected = expected.drop("A", axis=1)
# GH 39732
expected_index = MultiIndex.from_arrays([self.frame["A"], range(40)])
expected.index = expected_index
tm.assert_frame_equal(result, expected)
class TestEWM:
@pytest.mark.parametrize(
"method, expected_data",
[
["mean", [0.0, 0.6666666666666666, 1.4285714285714286, 2.2666666666666666]],
["std", [np.nan, 0.707107, 0.963624, 1.177164]],
["var", [np.nan, 0.5, 0.9285714285714286, 1.3857142857142857]],
],
)
def test_methods(self, method, expected_data):
# GH 16037
df = DataFrame({"A": ["a"] * 4, "B": range(4)})
result = getattr(df.groupby("A").ewm(com=1.0), method)()
expected = DataFrame(
{"B": expected_data},
index=MultiIndex.from_tuples(
[
("a", 0),
("a", 1),
("a", 2),
("a", 3),
],
names=["A", None],
),
)
tm.assert_frame_equal(result, expected)
expected = df.groupby("A").apply(lambda x: getattr(x.ewm(com=1.0), method)())
# There may be a bug in the above statement; not returning the correct index
tm.assert_frame_equal(result.reset_index(drop=True), expected)
@pytest.mark.parametrize(
"method, expected_data",
[["corr", [np.nan, 1.0, 1.0, 1]], ["cov", [np.nan, 0.5, 0.928571, 1.385714]]],
)
def test_pairwise_methods(self, method, expected_data):
# GH 16037
df = DataFrame({"A": ["a"] * 4, "B": range(4)})
result = getattr(df.groupby("A").ewm(com=1.0), method)()
expected = DataFrame(
{"B": expected_data},
index=MultiIndex.from_tuples(
[
("a", 0, "B"),
("a", 1, "B"),
("a", 2, "B"),
("a", 3, "B"),
],
names=["A", None, None],
),
)
tm.assert_frame_equal(result, expected)
expected = df.groupby("A").apply(lambda x: getattr(x.ewm(com=1.0), method)())
tm.assert_frame_equal(result, expected)
def test_times(self, times_frame):
# GH 40951
halflife = "23 days"
result = times_frame.groupby("A").ewm(halflife=halflife, times="C").mean()
expected = DataFrame(
{
"B": [
0.0,
0.507534,
1.020088,
1.537661,
0.0,
0.567395,
1.221209,
0.0,
0.653141,
1.195003,
]
},
index=MultiIndex.from_tuples(
[
("a", 0),
("a", 3),
("a", 6),
("a", 9),
("b", 1),
("b", 4),
("b", 7),
("c", 2),
("c", 5),
("c", 8),
],
names=["A", None],
),
)
tm.assert_frame_equal(result, expected)
def test_times_vs_apply(self, times_frame):
# GH 40951
halflife = "23 days"
result = times_frame.groupby("A").ewm(halflife=halflife, times="C").mean()
expected = (
times_frame.groupby("A")
.apply(lambda x: x.ewm(halflife=halflife, times="C").mean())
.iloc[[0, 3, 6, 9, 1, 4, 7, 2, 5, 8]]
.reset_index(drop=True)
)
tm.assert_frame_equal(result.reset_index(drop=True), expected)
def test_times_array(self, times_frame):
# GH 40951
halflife = "23 days"
result = times_frame.groupby("A").ewm(halflife=halflife, times="C").mean()
expected = (
times_frame.groupby("A")
.ewm(halflife=halflife, times=times_frame["C"].values)
.mean()
)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
anjalisood/spark-tk | regression-tests/sparktkregtests/testcases/frames/ecdf_test.py | 13 | 3032 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tests the ECDF functionality """
import unittest
import random
from sparktkregtests.lib import sparktk_test
class ecdfTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build test frame"""
super(ecdfTest, self).setUp()
# generate a dataset to test ecdf on
# it will just be a single column of ints
column = [[random.randint(0, 5)] for index in xrange(0, 20)]
schema = [("C0", int)]
self.frame = self.context.frame.create(column,
schema=schema)
def validate_ecdf(self):
# call sparktk ecdf function on the data and get as pandas df
ecdf_sparktk_result = self.frame.ecdf("C0")
pd_ecdf = ecdf_sparktk_result.to_pandas(ecdf_sparktk_result.row_count)
# get the original frame as pandas df so we can calculate our own result
pd_original_frame = self.frame.to_pandas(self.frame.row_count)
# the formula for calculating ecdf is
# F(x) = 1/n * sum from 1 to n of I(x_i)
# where I = { 1 if x_i <= x, 0 if x_i > x }
# i.e., for each element in our data column count
# the number of items in that row which are less than
# or equal to that item, divide by the number
# of total items in the column
grouped = pd_original_frame.groupby("C0").size()
our_result = grouped.sort_index().cumsum()*1.0/len(pd_original_frame)
# finaly we iterate through the sparktk result and compare it with our result
for index, row in pd_ecdf.iterrows():
self.assertAlmostEqual(row["C0"+'_ecdf'],
our_result[int(row["C0"])])
def test_ecdf_bad_name(self):
"""Test ecdf with an invalid column name."""
with self.assertRaisesRegexp(Exception, "No column named bad_name"):
self.frame.ecdf("bad_name")
def test_ecdf_bad_type(self):
"""Test ecdf with an invalid column type."""
with self.assertRaisesRegexp(Exception, "does not exist"):
self.frame.ecdf(5)
def test_ecdf_none(self):
"""Test ecdf with a None for the column name."""
with self.assertRaisesRegexp(Exception, "column is required"):
self.frame.ecdf(None)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
pochoi/SHTOOLS | examples/python/ClassInterface/ClassExample.py | 2 | 1931 | #!/usr/bin/env python
"""
This script tests the python class interface
"""
from __future__ import division
from __future__ import print_function
# standard imports:
import os
import sys
import numpy as np
import matplotlib as mpl
# import shtools:
sys.path.append(os.path.join(os.path.dirname(__file__), "../../.."))
from pyshtools import SHCoeffs
# set shtools plot style:
sys.path.append(os.path.join(os.path.dirname(__file__), "../Common"))
from FigStyle import style_shtools
mpl.rcParams.update(style_shtools)
#==== MAIN FUNCTION ====
def main():
example1()
example2()
def example1():
# generate random spherical harmonics coefficients with a given
# power spectrum and plot its bandspectrum
degrees = np.arange(201)
scale = 10
power = 1. / (1. + (degrees / scale)**2)**2
coeffs = SHCoeffs.from_random(power)
coeffs.plot_powerperband(show=False, fname='power.png')
# expand coefficients into a spatial grid and plot it
grid1 = coeffs.expand(kind='DH1')
grid1.plot_rawdata(show=False, fname='DHGrid_unrotated.png')
# rotate coefficients, expand to grid and plot again
coeffs.rotate(40., 0., 0., degrees=True)
grid2 = coeffs.expand(kind='DH1')
grid2.plot_rawdata(show=False, fname='DHGrid_rotated.png')
def example2():
# generate random spherical harmonics coefficients with a given
# power spectrum and plot its bandspectrum
degrees = np.arange(201)
scale = 10
power = 1. / (1. + (degrees / scale)**2)**2
coeffs = SHCoeffs.from_random(power)
coeffs.plot_powerperband(show=False, fname='power.png')
# expand coefficients into two different spatial grids and plot it
grid1 = coeffs.expand(kind='GLQ')
grid1.plot_rawdata(show=False, fname='GLQGrid.png')
grid2 = coeffs.expand(kind='DH1')
grid2.plot_rawdata(show=False,fname='DHGrid.png')
#==== EXECUTE SCRIPT ====
if __name__ == "__main__":
main()
| bsd-3-clause |
ronw/siplca-segmentation | plot_pages.py | 1 | 3604 | # Copyright (C) 2009 Ron J. Weiss ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import tkSimpleDialog
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
def plot_pages(funcs, draw_pager_controls=True):
"""Sets up a figure with controls for browsing across multiple "pages"
Each entry of the funcs array should be a function that takes no
arguments and draws the contents of a particular page.
E.g. plot_pages([lambda: plt.plot(x, y),
lambda: plt.imshow(np.random.rand(100,100))])
will set up a figure containing plot(x,y) and controls that allow
the user to switch to the next page. When the "Next" button is clicked,
the figure will be redrawn to contain a random 100x100 pixel image.
Note that the funcs[i] is called every time the user switches to
page i. So in the previous example, the random image will change
every time page 2 is redrawn (by interacting with the paging
controls, not by underlying GUI updates).
"""
return _plot_page(funcs, 0, draw_pager_controls)
def _plot_page(funcs, curr_page, draw_pager_controls=True):
isinteractive = plt.isinteractive()
if isinteractive:
plt.ioff()
plt.clf()
h = funcs[curr_page]()
if draw_pager_controls and len(funcs) > 1:
_add_pager_controls(funcs, curr_page)
plt.draw()
if isinteractive:
plt.ion()
return h
def _add_pager_controls(funcs, curr_page):
npages = len(funcs)
bpos = np.asarray([0.0125, 0.0125, 0.06, 0.06])
pos = bpos
_create_pager_button(pos, 'First',
lambda ev: _plot_page(funcs, 0),
curr_page != 0)
pos += [bpos[2], 0, 0, 0]
_create_pager_button(pos, 'Prev',
lambda ev: _plot_page(funcs, curr_page - 1),
curr_page > 0)
pos += [bpos[2], 0, 0.05, 0]
def open_page_dialog():
page = tkSimpleDialog.askinteger('Jump to page',
'Jump to page (1 -- %d)' % npages)
if not page or page < 0 or page > npages:
page = curr_page + 1
return page - 1
_create_pager_button(pos, '%d / %d' % (curr_page+1, npages),
lambda ev: _plot_page(funcs, open_page_dialog()),
True)
pos += [bpos[2], 0, -0.05, 0]
_create_pager_button(pos, 'Next',
lambda ev: _plot_page(funcs, curr_page + 1),
curr_page < npages - 1)
pos += [pos[2], 0, 0, 0]
_create_pager_button(pos, 'Last',
lambda ev: _plot_page(funcs, npages - 1),
curr_page != npages - 1)
def _create_pager_button(pos, label, fun, enabled=True):
disabled_color = '#999999'
ax = plt.axes(pos)
b = mpl.widgets.Button(ax, label)
if enabled:
b.on_clicked(fun)
else:
b.color = disabled_color
b.hovercolor = b.color
| gpl-3.0 |
AdrienGuille/TOM | tom_lib/nlp/topic_model.py | 1 | 13558 | # coding: utf-8
import itertools
from abc import ABCMeta, abstractmethod
import numpy as np
import tom_lib.stats
from scipy import spatial, cluster
from scipy.sparse import coo_matrix
from sklearn.decomposition import NMF, LatentDirichletAllocation as LDA
#import lda
from tom_lib.structure.corpus import Corpus
__author__ = "Adrien Guille, Pavel Soriano"
__email__ = "[email protected]"
class TopicModel(object):
__metaclass__ = ABCMeta
def __init__(self, corpus):
self.corpus = corpus # a Corpus object
self.document_topic_matrix = None # document x topic matrix
self.topic_word_matrix = None # topic x word matrix
self.nb_topics = None # a scalar value > 1
@abstractmethod
def infer_topics(self, num_topics=10, **kwargs):
pass
def greene_metric(self, min_num_topics=10, step=5, max_num_topics=50, top_n_words=10, tao=10):
"""
Implements Greene metric to compute the optimal number of topics. Taken from How Many Topics?
Stability Analysis for Topic Models from Greene et al. 2014.
:param step:
:param min_num_topics: Minimum number of topics to test
:param max_num_topics: Maximum number of topics to test
:param top_n_words: Top n words for topic to use
:param tao: Number of sampled models to build
:return: A list of len (max_num_topics - min_num_topics) with the stability of each tested k
"""
stability = []
# Build reference topic model
# Generate tao topic models with tao samples of the corpus
for k in range(min_num_topics, max_num_topics + 1, step):
self.infer_topics(k)
reference_rank = [list(zip(*self.top_words(i, top_n_words)))[0] for i in range(k)]
agreement_score_list = []
for t in range(tao):
tao_corpus = Corpus(source_file_path=self.corpus._source_file_path,
language=self.corpus._language,
n_gram=self.corpus._n_gram,
vectorization=self.corpus._vectorization,
max_relative_frequency=self.corpus._max_relative_frequency,
min_absolute_frequency=self.corpus._min_absolute_frequency,
sample=True)
tao_model = type(self)(tao_corpus)
tao_model.infer_topics(k)
tao_rank = [next(zip(*tao_model.top_words(i, top_n_words))) for i in range(k)]
agreement_score_list.append(tom_lib.stats.agreement_score(reference_rank, tao_rank))
stability.append(np.mean(agreement_score_list))
return stability
def arun_metric(self, min_num_topics=10, max_num_topics=50, iterations=10):
"""
Implements Arun metric to estimate the optimal number of topics:
Arun, R., V. Suresh, C. V. Madhavan, and M. N. Murthy
On finding the natural number of topics with latent dirichlet allocation: Some observations.
In PAKDD (2010), pp. 391–402.
:param min_num_topics: Minimum number of topics to test
:param max_num_topics: Maximum number of topics to test
:param iterations: Number of iterations per value of k
:return: A list of len (max_num_topics - min_num_topics) with the average symmetric KL divergence for each k
"""
kl_matrix = []
for j in range(iterations):
kl_list = []
l = np.array([sum(self.corpus.vector_for_document(doc_id)) for doc_id in range(self.corpus.size)]) # document length
norm = np.linalg.norm(l)
for i in range(min_num_topics, max_num_topics + 1):
self.infer_topics(i)
c_m1 = np.linalg.svd(self.topic_word_matrix.todense(), compute_uv=False)
c_m2 = l.dot(self.document_topic_matrix.todense())
c_m2 += 0.0001 # we need this to prevent components equal to zero
c_m2 /= norm
kl_list.append(tom_lib.stats.symmetric_kl(c_m1.tolist(), c_m2.tolist()[0]))
kl_matrix.append(kl_list)
ouput = np.array(kl_matrix)
return ouput.mean(axis=0)
def brunet_metric(self, min_num_topics=10, max_num_topics=50, iterations=10):
"""
Implements a consensus-based metric to estimate the optimal number of topics:
Brunet, J.P., Tamayo, P., Golub, T.R., Mesirov, J.P.
Metagenes and molecular pattern discovery using matrix factorization.
Proc. National Academy of Sciences 101(12) (2004), pp. 4164–4169
:param min_num_topics:
:param max_num_topics:
:param iterations:
:return:
"""
cophenetic_correlation = []
for i in range(min_num_topics, max_num_topics+1):
average_C = np.zeros((self.corpus.size, self.corpus.size))
for j in range(iterations):
self.infer_topics(i)
for p in range(self.corpus.size):
for q in range(self.corpus.size):
if self.most_likely_topic_for_document(p) == self.most_likely_topic_for_document(q):
average_C[p, q] += float(1./iterations)
clustering = cluster.hierarchy.linkage(average_C, method='average')
Z = cluster.hierarchy.dendrogram(clustering, orientation='right')
index = Z['leaves']
average_C = average_C[index, :]
average_C = average_C[:, index]
(c, d) = cluster.hierarchy.cophenet(Z=clustering, Y=spatial.distance.pdist(average_C))
# plt.clf()
# f, ax = plt.subplots(figsize=(11, 9))
# ax = sns.heatmap(average_C)
# plt.savefig('reorderedC.png')
cophenetic_correlation.append(c)
return cophenetic_correlation
def print_topics(self, num_words=10, sort_by_freq=''):
frequency = self.topics_frequency()
topic_list = []
for topic_id in range(self.nb_topics):
word_list = []
for weighted_word in self.top_words(topic_id, num_words):
word_list.append(weighted_word[0])
topic_list.append((topic_id, frequency[topic_id], word_list))
if sort_by_freq == 'asc':
topic_list.sort(key=lambda x: x[1], reverse=False)
elif sort_by_freq == 'desc':
topic_list.sort(key=lambda x: x[1], reverse=True)
for topic_id, frequency, topic_desc in topic_list:
print('topic %d\t%f\t%s' % (topic_id, frequency, ' '.join(topic_desc)))
def top_words(self, topic_id, num_words):
vector = self.topic_word_matrix[topic_id]
cx = vector.tocoo()
weighted_words = [()] * len(self.corpus.vocabulary)
for row, word_id, weight in itertools.zip_longest(cx.row, cx.col, cx.data):
weighted_words[word_id] = (self.corpus.word_for_id(word_id), weight)
weighted_words.sort(key=lambda x: x[1], reverse=True)
return weighted_words[:num_words]
def top_documents(self, topic_id, num_docs):
vector = self.document_topic_matrix[:, topic_id]
cx = vector.tocoo()
weighted_docs = [()] * self.corpus.size
for doc_id, topic_id, weight in itertools.zip_longest(cx.row, cx.col, cx.data):
weighted_docs[doc_id] = (doc_id, weight)
weighted_docs.sort(key=lambda x: x[1], reverse=True)
return weighted_docs[:num_docs]
def word_distribution_for_topic(self, topic_id):
vector = self.topic_word_matrix[topic_id].toarray()
return vector[0]
def topic_distribution_for_document(self, doc_id):
vector = self.document_topic_matrix[doc_id].toarray()
return vector[0]
def topic_distribution_for_word(self, word_id):
vector = self.topic_word_matrix[:, word_id].toarray()
return vector.T[0]
def topic_distribution_for_author(self, author_name):
all_weights = []
for document_id in self.corpus.documents_by_author(author_name):
all_weights.append(self.topic_distribution_for_document(document_id))
output = np.array(all_weights)
return output.mean(axis=0)
def most_likely_topic_for_document(self, doc_id):
weights = list(self.topic_distribution_for_document(doc_id))
return weights.index(max(weights))
def topic_frequency(self, topic, date=None):
return self.topics_frequency(date=date)[topic]
def topics_frequency(self, date=None):
frequency = np.zeros(self.nb_topics)
if date is None:
ids = range(self.corpus.size)
else:
ids = self.corpus.doc_ids(date)
for i in ids:
topic = self.most_likely_topic_for_document(i)
frequency[topic] += 1.0 / len(ids)
return frequency
def documents_for_topic(self, topic_id):
doc_ids = []
for doc_id in range(self.corpus.size):
most_likely_topic = self.most_likely_topic_for_document(doc_id)
if most_likely_topic == topic_id:
doc_ids.append(doc_id)
return doc_ids
def documents_per_topic(self):
topic_associations = {}
for i in range(self.corpus.size):
topic_id = self.most_likely_topic_for_document(i)
if topic_associations.get(topic_id):
documents = topic_associations[topic_id]
documents.append(i)
topic_associations[topic_id] = documents
else:
documents = [i]
topic_associations[topic_id] = documents
return topic_associations
def affiliation_repartition(self, topic_id):
counts = {}
doc_ids = self.documents_for_topic(topic_id)
for i in doc_ids:
affiliations = set(self.corpus.affiliation(i))
for affiliation in affiliations:
if counts.get(affiliation) is not None:
count = counts[affiliation] + 1
counts[affiliation] = count
else:
counts[affiliation] = 1
tuples = []
for affiliation, count in counts.items():
tuples.append((affiliation, count))
tuples.sort(key=lambda x: x[1], reverse=True)
return tuples
class LatentDirichletAllocation(TopicModel):
def infer_topics(self, num_topics=10, algorithm='variational', **kwargs):
self.nb_topics = num_topics
lda_model = None
topic_document = None
if algorithm == 'variational':
lda_model = LDA(n_components=num_topics, learning_method='batch')
topic_document = lda_model.fit_transform(self.corpus.sklearn_vector_space)
elif algorithm == 'gibbs':
lda_model = lda.LDA(n_topics=num_topics, n_iter=500)
topic_document = lda_model.fit_transform(self.corpus.sklearn_vector_space)
else:
raise ValueError("algorithm must be either 'variational' or 'gibbs', got '%s'" % algorithm)
self.topic_word_matrix = []
self.document_topic_matrix = []
vocabulary_size = len(self.corpus.vocabulary)
row = []
col = []
data = []
for topic_idx, topic in enumerate(lda_model.components_):
for i in range(vocabulary_size):
row.append(topic_idx)
col.append(i)
data.append(topic[i])
self.topic_word_matrix = coo_matrix((data, (row, col)),
shape=(self.nb_topics, len(self.corpus.vocabulary))).tocsr()
row = []
col = []
data = []
doc_count = 0
for doc in topic_document:
topic_count = 0
for topic_weight in doc:
row.append(doc_count)
col.append(topic_count)
data.append(topic_weight)
topic_count += 1
doc_count += 1
self.document_topic_matrix = coo_matrix((data, (row, col)),
shape=(self.corpus.size, self.nb_topics)).tocsr()
class NonNegativeMatrixFactorization(TopicModel):
def infer_topics(self, num_topics=10, **kwargs):
self.nb_topics = num_topics
nmf = NMF(n_components=num_topics)
topic_document = nmf.fit_transform(self.corpus.sklearn_vector_space)
self.topic_word_matrix = []
self.document_topic_matrix = []
vocabulary_size = len(self.corpus.vocabulary)
row = []
col = []
data = []
for topic_idx, topic in enumerate(nmf.components_):
for i in range(vocabulary_size):
row.append(topic_idx)
col.append(i)
data.append(topic[i])
self.topic_word_matrix = coo_matrix((data, (row, col)),
shape=(self.nb_topics, len(self.corpus.vocabulary))).tocsr()
row = []
col = []
data = []
doc_count = 0
for doc in topic_document:
topic_count = 0
for topic_weight in doc:
row.append(doc_count)
col.append(topic_count)
data.append(topic_weight)
topic_count += 1
doc_count += 1
self.document_topic_matrix = coo_matrix((data, (row, col)),
shape=(self.corpus.size, self.nb_topics)).tocsr()
| mit |
lukauskas/scipy | scipy/integrate/quadrature.py | 26 | 27908 | from __future__ import division, print_function, absolute_import
__all__ = ['fixed_quad','quadrature','romberg','trapz','simps','romb',
'cumtrapz','newton_cotes']
from scipy.special.orthogonal import p_roots
from scipy.special import gammaln
from numpy import sum, ones, add, diff, isinf, isscalar, \
asarray, real, trapz, arange, empty
import numpy as np
import math
import warnings
from scipy._lib.six import xrange
class AccuracyWarning(Warning):
pass
def _cached_p_roots(n):
"""
Cache p_roots results for speeding up multiple calls of the fixed_quad function.
"""
if n in _cached_p_roots.cache:
return _cached_p_roots.cache[n]
_cached_p_roots.cache[n] = p_roots(n)
return _cached_p_roots.cache[n]
_cached_p_roots.cache = dict()
def fixed_quad(func,a,b,args=(),n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
"""
[x,w] = _cached_p_roots(n)
x = real(x)
ainf, binf = map(isinf,(a,b))
if ainf or binf:
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0*sum(w*func(y,*args),0), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if isscalar(x):
return func(x, *args)
x = asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
if hasattr(y0, 'dtype'):
output = empty((n,), dtype=y0.dtype)
else:
output = empty((n,), dtype=type(y0))
output[0] = y0
for i in xrange(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in xrange(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : int, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, uses this value as the first value in the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumtrapz(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = asarray(y)
if x is None:
d = dx
else:
x = asarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
else:
d = diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = add.accumulate(d * (y[slice1] + y[slice2]) / 2.0, axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.ones(shape, dtype=res.dtype) * initial, res],
axis=axis)
return res
def _basic_simps(y,start,stop,x,dx,axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
all = (slice(None),)*nd
slice0 = tupleset(all, axis, slice(start, stop, step))
slice1 = tupleset(all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(all, axis, slice(start+2, stop+2, step))
if x is None: # Even spaced Simpson's rule.
result = add.reduce(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
axis)
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = diff(x,axis=axis)
sl0 = tupleset(all, axis, slice(start, stop, step))
sl1 = tupleset(all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
result = add.reduce(hsum/6.0*(y[slice0]*(2-1.0/h0divh1) +
y[slice1]*hsum*hsum/hprod +
y[slice2]*(2-h0divh1)),axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `y`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : {'avg', 'first', 'str'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
"""
y = asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = asarray(x)
if len(x.shape) == 1:
shapex = ones(nd)
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be 'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y,0,N-3,x,dx,axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y,1,N-2,x,dx,axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y,0,N-2,x,dx,axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
"""
y = asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
all = (slice(None),) * nd
slice0 = tupleset(all, axis, 0)
slicem1 = tupleset(all, axis, -1)
h = Ninterv*asarray(dx)*1.0
R[(0,0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = all
start = stop = step = Ninterv
for i in range(1,k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start,stop,step))
step >>= 1
R[(i,0)] = 0.5*(R[(i-1,0)] + h*add.reduce(y[slice_R],axis))
for j in range(1,i+1):
R[(i,j)] = R[(i,j-1)] + \
(R[(i,j-1)]-R[(i-1,j-1)]) / ((1 << (2*j))-1)
h = h / 2.0
if show:
if not isscalar(R[(0,0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%" + str(width) + '.' + str(precis)+'f'
print("\n Richardson Extrapolation Table for Romberg Integration ")
print("====================================================================")
for i in range(0,k+1):
for j in range(0,i+1):
print(formstr % R[(i,j)], end=' ')
print()
print("====================================================================\n")
return R[(k,k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <[email protected]>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <[email protected]>
# last revision: 1999-7-21
#
# Adapted to scipy by Travis Oliphant <[email protected]>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * arange(0, numtosum)
s = sum(function(points),0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in range(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in range(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simps : Integrators for sampled data.
cumtrapz : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' http://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if isinf(a) or isinf(b):
raise ValueError("Romberg integration only available for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a,b]
intrange = b-a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
for i in xrange(1, divmax+1):
n = n * 2
ordsum = ordsum + _difftrap(vfunc, interval, n)
resmat.append([])
resmat[i].append(intrange * ordsum / n)
for k in range(i):
resmat[i].append(_romberg_diff(resmat[i-1][k], resmat[i][k], k+1))
result = resmat[i][i]
lastresult = resmat[i-1][i-1]
err = abs(result - lastresult)
if err < tol or err < rtol*abs(result):
break
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Netwon-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1:(1,2,[1,1],-1,12),
2:(1,3,[1,4,1],-1,90),
3:(3,8,[1,3,3,1],-3,80),
4:(2,45,[7,32,12,32,7],-8,945),
5:(5,288,[19,75,50,50,75,19],-275,12096),
6:(1,140,[41,216,27,272,27,216,41],-9,1400),
7:(7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8:(4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9:(9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10:(5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11:(11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12:(1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13:(13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14:(7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\\int_{x_0}^{x_N} f(x)dx = \\Delta x \\sum_{i=0}^{N} a_i f(x_i)
+ B_N (\\Delta x)^{N+2} f^{N+1} (\\xi)`
where :math:`\\xi \\in [x_0,x_N]` and :math:`\\Delta x = \\frac{x_N-x_0}{N}`
is the averages samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\\Delta x)^{N+3} f^{N+2}(\\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
return na*np.array(vi,float)/da, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2.0*yi - 1
nvec = np.arange(0,N+1)
C = ti**nvec[:,np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = np.dot(Cinv[:,::2],vec) * N/2
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
| bsd-3-clause |
jjx02230808/project0223 | examples/svm/plot_svm_nonlinear.py | 268 | 1091 | """
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
| bsd-3-clause |
google/fuzzbench | analysis/test_coverage_data_utils.py | 1 | 3522 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions andsss
# limitations under the License.
"""Tests for coverage_data_utils.py"""
import pandas as pd
import pandas.testing as pd_test
from analysis import coverage_data_utils
def create_coverage_data():
"""Utility function to create test data."""
return {
"afl libpng-1.2.56": [[0, 0, 1, 1], [0, 0, 2, 2], [0, 0, 3, 3]],
"libfuzzer libpng-1.2.56": [[0, 0, 1, 1], [0, 0, 2, 3], [0, 0, 3, 3],
[0, 0, 4, 4]]
}
def test_get_unique_region_dict():
"""Tests get_unique_region_dict() function."""
coverage_dict = create_coverage_data()
benchmark_coverage_dict = coverage_data_utils.get_benchmark_cov_dict(
coverage_dict, 'libpng-1.2.56')
unique_region_dict = coverage_data_utils.get_unique_region_dict(
benchmark_coverage_dict)
expected_dict = {
(0, 0, 2, 2): ['afl'],
(0, 0, 2, 3): ['libfuzzer'],
(0, 0, 4, 4): ['libfuzzer']
}
assert expected_dict == unique_region_dict
def test_get_unique_region_cov_df():
"""Tests get_unique_region_cov_df() function."""
coverage_dict = create_coverage_data()
benchmark_coverage_dict = coverage_data_utils.get_benchmark_cov_dict(
coverage_dict, 'libpng-1.2.56')
unique_region_dict = coverage_data_utils.get_unique_region_dict(
benchmark_coverage_dict)
fuzzer_names = ['afl', 'libfuzzer']
unique_region_df = coverage_data_utils.get_unique_region_cov_df(
unique_region_dict, fuzzer_names)
unique_region_df = unique_region_df.sort_values(by=['fuzzer']).reset_index(
drop=True)
expected_df = pd.DataFrame([{
'fuzzer': 'afl',
'unique_regions_covered': 1
}, {
'fuzzer': 'libfuzzer',
'unique_regions_covered': 2
}])
assert unique_region_df.equals(expected_df)
def test_get_benchmark_cov_dict():
"""Tests that get_benchmark_cov_dict() returns correct dictionary."""
coverage_dict = create_coverage_data()
benchmark = 'libpng-1.2.56'
benchmark_cov_dict = coverage_data_utils.get_benchmark_cov_dict(
coverage_dict, benchmark)
expected_cov_dict = {
"afl": {(0, 0, 3, 3), (0, 0, 2, 2), (0, 0, 1, 1)},
"libfuzzer": {(0, 0, 4, 4), (0, 0, 3, 3), (0, 0, 2, 3), (0, 0, 1, 1)}
}
assert expected_cov_dict == benchmark_cov_dict
def test_get_pairwise_unique_coverage_table():
"""Tests that get_pairwise_unique_coverage_table() gives the
correct dataframe."""
coverage_dict = create_coverage_data()
benchmark_coverage_dict = coverage_data_utils.get_benchmark_cov_dict(
coverage_dict, 'libpng-1.2.56')
fuzzers = ['libfuzzer', 'afl']
table = coverage_data_utils.get_pairwise_unique_coverage_table(
benchmark_coverage_dict, fuzzers)
expected_table = pd.DataFrame([[0, 1], [2, 0]],
index=fuzzers,
columns=fuzzers)
pd_test.assert_frame_equal(table, expected_table)
| apache-2.0 |
TariqAHassan/BioVida | biovida/genomics/disgenet_interface.py | 1 | 8839 | # coding: utf-8
"""
DisGeNET Interface
~~~~~~~~~~~~~~~~~~
"""
import os
import requests
import pandas as pd
# Tool to create required caches
from biovida.support_tools._cache_management import package_cache_creator
# BioVida Support Tools
from biovida.support_tools.support_tools import header, camel_to_snake_case, list_to_bulletpoints
# BioVida Printing Tools
from biovida.support_tools.printing import dict_pprint
# ---------------------------------------------------------------------------------------------
# DisGeNET Reference Data
# ---------------------------------------------------------------------------------------------
_disgenet_delimited_databases = {
# Source: http://www.disgenet.org/web/DisGeNET/menu/downloads#curated
# Structure: {database_short_name: {full_name: ..., url: ..., description: ..., number_of_rows_in_header: ...}}
'all': {
'full_name': 'All Gene-Disease Associations',
'url': 'http://www.disgenet.org/ds/DisGeNET/results/all_gene_disease_associations.tsv.gz',
'description': 'The file contains all gene-disease associations in DisGeNET.',
'header': 21
},
'curated': {
'full_name': 'Curated Gene-Disease Associations',
'url': 'http://www.disgenet.org/ds/DisGeNET/results/curated_gene_disease_associations.tsv.gz',
'description': 'The file contains gene-disease associations from UNIPROT, CTD (human subset), ClinVar, Orphanet,'
' and the GWAS Catalog.',
'header': 21
},
'snp_disgenet': {
'full_name': 'All SNP-Gene-Disease Associations',
'url': 'http://www.disgenet.org/ds/DisGeNET/results/all_snps_sentences_pubmeds.tsv.gz',
'description': 'All SNP-gene-disease associations.',
'header': 20
},
}
# ---------------------------------------------------------------------------------------------
# Tools for Harvesting DisGeNET Data
# ---------------------------------------------------------------------------------------------
class DisgenetInterface(object):
"""
Python Interface for Harvesting Databases from `DisGeNET <http://www.disgenet.org/>`_.
:param cache_path: location of the BioVida cache. If one does not exist in this location, one will created.
Default to ``None`` (which will generate a cache in the home folder).
:type cache_path: ``str`` or ``None``
:param verbose: If ``True``, print notice when downloading database. Defaults to ``True``.
:type verbose: ``bool``
"""
@staticmethod
def _disgenet_readme(created_gene_dirs):
"""
Writes the DisGeNET README to disk.
:param created_gene_dirs: the dictionary of directories returned by ``_package_cache_creator()``
:type created_gene_dirs: ``dict``
"""
save_address = os.path.join(created_gene_dirs['disgenet'], 'DisGeNET_README.txt')
if not os.path.isfile(save_address):
readme_url = 'http://www.disgenet.org/ds/DisGeNET/results/readme.txt'
r = requests.get(readme_url, stream=True)
with open(save_address, 'wb') as f:
f.write(r.content)
header("The DisGeNET README has been downloaded to:\n\n {0}\n\n"
"Please take the time to review this document.".format(save_address),
flank=False)
def __init__(self, cache_path=None, verbose=True):
"""
Initialize the ``DisgenetInterface()`` Class.
"""
self._verbose = verbose
# Cache Creation
ppc = package_cache_creator(sub_dir='genomics',
cache_path=cache_path,
to_create=['disgenet'],
verbose=verbose)
self.root_path, self._created_gene_dirs = ppc
# Check if a readme exists.
self._disgenet_readme(self._created_gene_dirs)
# Containers for the most recently requested database.
self.current_database = None
self.current_database_name = None
self.current_database_full_name = None
self.current_database_description = None
@staticmethod
def _disgenet_delimited_databases_key_error(database):
"""
Raises an error when an reference is made to a database not in `_disgenet_delimited_databases.keys()`.
:param database: `erroneous` database reference.
:type database: ``str``
"""
if database not in _disgenet_delimited_databases:
raise ValueError(
"'{0}' is an invalid value for `database`.\n`database` must be one of:\n{1}".format(
str(database), list_to_bulletpoints(_disgenet_delimited_databases.keys())))
def options(self, database=None, pretty_print=True):
"""
Disgenet databases which can be downloaded
as well as additional information about the databases.
:param database: A database to review. Must be one of: 'all', 'curated', 'snp_disgenet' or ``None``.
If a specific database is given, the database's full name and description will be provided.
If ``None``, a list of databases which can be downloaded will be returned (or printed).
Defaults to ``None``.
:type database: ``str``
:param pretty_print: pretty print the information. Defaults to True.
:type pretty_print: ``bool``
:return: a ``list`` if `database` is ``None``, else a ``dict`` with the database's full name and description.
:rtype: ``list`` or ``dict``
"""
if database is None:
info = list(_disgenet_delimited_databases.keys())
elif database in _disgenet_delimited_databases:
info = {k: v for k, v in _disgenet_delimited_databases[database].items()
if k in ['full_name', 'description']}
else:
self._disgenet_delimited_databases_key_error(database)
if pretty_print:
if database is None:
print("Available Databases:\n")
print(list_to_bulletpoints(info))
else:
dict_pprint(info)
else:
return info
@staticmethod
def _df_clean(data_frame):
"""
Clean the dataframe generated by ``pull()``
:param data_frame:
:type data_frame: ``Pandas DataFrame``
:return: see description.
:rtype: ``Pandas DataFrame``
"""
# Lower to make easier to match in the future
data_frame['diseaseName'] = data_frame['diseaseName'].map(
lambda x: x.lower() if isinstance(x, str) else x, na_action='ignore')
data_frame.columns = list(map(camel_to_snake_case, data_frame.columns))
return data_frame
def pull(self, database, download_override=False):
"""
Pull (i.e., download) a DisGeNET Database.
Note: if a database is already cached, it will be used instead of downloading
(the `download_override` argument can be used override this behaviour).
:param database: A database to download. Must be one of: 'all', 'curated', 'snp_disgenet' or ``None``.
See ``options()`` for more information.
:type database: ``str``
:param download_override: If ``True``, override any existing database currently cached and download a new one.
Defaults to ``False``.
:type download_override: ``bool``
:return: a DisGeNET database
:rtype: ``Pandas DataFrame``
"""
self._disgenet_delimited_databases_key_error(database)
db_url = _disgenet_delimited_databases[database]['url']
save_name = "{0}.p".format(db_url.split("/")[-1].split(".")[0])
save_address = os.path.join(self._created_gene_dirs['disgenet'], save_name)
if download_override or not os.path.isfile(save_address):
if self._verbose:
header("Downloading DisGeNET Database... ", flank=False)
data_frame = pd.read_csv(db_url,
sep='\t',
header=_disgenet_delimited_databases[database]['header'],
compression='gzip')
self._df_clean(data_frame).to_pickle(save_address)
else:
data_frame = pd.read_pickle(save_address)
# Cache the database
self.current_database = data_frame
self.current_database_name = database
self.current_database_full_name = _disgenet_delimited_databases[database]['full_name']
self.current_database_description = _disgenet_delimited_databases[database]['description']
return data_frame
| bsd-3-clause |
fako/datascope | src/core/utils/tests/data.py | 1 | 14820 | from mock import patch, Mock
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from django.test import TestCase
from core.utils.data import NumericFeaturesFrame
from core.models import Collective, Individual
from core.exceptions import DSFileLoadError
class TestNumericFeaturesFrame(TestCase):
fixtures = ["test-organisms"]
def setUp(self):
super().setUp()
self.test_fixture = Collective.objects.get(id=2)
self.test_records = [
{
"is_dutch": 1.0,
"is_english": 0.0,
"value_number": 1.0
},
{
"is_dutch": 1.0,
"is_english": 0.0,
"value_number": 2.0
},
{
"is_dutch": 1.0,
"is_english": 0.0,
"value_number": 1.0
},
{
"is_dutch": 0.0,
"is_english": 1.0,
"value_number": 1.0
},
{
"is_dutch": 0.0,
"is_english": 1.0,
"value_number": 2.0
}
]
test_frame = pd.DataFrame.from_records(self.test_records, index=[4, 5, 6, 7, 8])
test_frame = (test_frame - test_frame.min()) / (test_frame.max() - test_frame.min())
self.test_frame = test_frame.fillna(0)
self.test_records_extra = [
{
"is_dutch": 0.0,
"is_english": 0.0,
"value_number": 1.0
},
{
"is_dutch": 0.0,
"is_english": 0.0,
"value_number": 2.0
}
]
test_frame_extra = pd.DataFrame.from_records(self.test_records + self.test_records_extra,
index=[4, 5, 6, 7, 8, 9, 10])
test_frame_extra = (test_frame_extra - test_frame_extra.min()) / \
(test_frame_extra.max() - test_frame_extra.min())
self.test_frame_extra = test_frame_extra.fillna(0)
self.features = [
TestNumericFeaturesFrame.is_dutch,
TestNumericFeaturesFrame.is_english,
TestNumericFeaturesFrame.value_number
]
self.frame = NumericFeaturesFrame(
TestNumericFeaturesFrame.get_identifier,
self.features,
self.get_iterator
)
self.extra_individuals = [
Individual.objects.create(
id=9,
properties={
'country': 'FR',
'language': 'fr',
'value': '1',
'word': 'pension'
},
community=self.test_fixture.community,
collective=self.test_fixture
),
Individual.objects.create(
id=10,
properties={
'country': 'FR',
'language': 'fr',
'value': '2',
'word': 'pension'
},
community=self.test_fixture.community,
collective=self.test_fixture
)
]
@staticmethod
def get_identifier(test):
return test.id
def get_iterator(self):
"""
Returns content that is already in fixtures
"""
return self.test_fixture.individual_set.filter(id__lt=9).iterator()
def get_extra_iterator(self):
"""
Returns content that is created in setUp
"""
return iter(self.extra_individuals)
@staticmethod
def is_dutch(test):
return float(test["language"] == "nl")
@staticmethod
def is_english(test):
return int(test["language"] == "en") # NB: features should return floats, but ints are allowed
@staticmethod
def value_number(test):
return test["value"]
@staticmethod
def invalid_arguments():
return 0.0
@staticmethod
def invalid_return(test):
return "invalid"
@staticmethod
def set_language_to_fr(test):
test["language"] = "fr"
return 0.0
def test_init(self):
sorted_feature_names = ["is_dutch", "is_english", "value_number"]
self.assertEqual(
sorted(self.frame.features.keys()),
sorted_feature_names
)
self.assertTrue(callable(self.frame.content))
assert_frame_equal(self.frame.data, self.test_frame, check_like=True)
def test_init_invalid_features(self):
features = [
TestNumericFeaturesFrame.invalid_arguments
]
try:
NumericFeaturesFrame(
TestNumericFeaturesFrame.get_identifier,
features,
self.get_iterator
)
self.fail("NumericFeaturesFrame did not raise with invalid feature")
except Exception as exc:
self.assertEqual(
str(exc),
"invalid_arguments feature: TypeError: invalid_arguments() takes 0 positional arguments but 1 was given"
)
features = [
TestNumericFeaturesFrame.invalid_return
]
try:
NumericFeaturesFrame(
TestNumericFeaturesFrame.get_identifier,
features,
self.get_iterator
)
self.fail("NumericFeaturesFrame did not raise with invalid feature return value")
except ValueError as exc:
self.assertEqual(
str(exc),
"invalid_return feature did not return float but <class 'str'>"
)
def test_init_immutable_content(self):
content = list(self.get_iterator())
features = [
TestNumericFeaturesFrame.set_language_to_fr
]
try:
NumericFeaturesFrame(
self.get_identifier,
features,
lambda: content
)
self.fail("NumericFeaturesFrame did not raise when features modified content")
except ValueError:
pass
def test_init_file(self):
with patch("core.utils.data.numeric_features.NumericFeaturesFrame.from_disk", return_value=self.test_frame) as \
from_disk_patch:
frame = NumericFeaturesFrame(
self.get_identifier,
self.features,
file_path="test/path/to/frame.pkl"
)
sorted_feature_names = ["is_dutch", "is_english", "value_number"]
self.assertEqual(
sorted(frame.features.keys()),
sorted_feature_names
)
from_disk_patch.assert_called_once_with("test/path/to/frame.pkl")
def test_to_disk(self):
self.frame.data.to_pickle = Mock()
self.frame.to_disk("test/path/to/frame.pkl")
self.frame.data.to_pickle.assert_called_once_with('test/path/to/frame.pkl')
def test_from_disk(self):
with patch("core.utils.data.numeric_features.pd.read_pickle", return_value=self.test_frame) as pandas_patch:
self.frame.from_disk("test/path/to/frame.pkl")
pandas_patch.assert_called_once_with("test/path/to/frame.pkl")
assert_frame_equal(self.frame.data, self.test_frame, check_like=True)
def test_from_disk_invalid(self):
self.test_frame["extra"] = self.test_frame["is_dutch"]
with patch("core.utils.data.numeric_features.pd.read_pickle", return_value=self.test_frame) as pandas_patch:
try:
self.frame.from_disk("test/path/to/frame.pkl")
self.fail("NumericFeatureFrame.from_disk did not raise an assertion when loading too much data")
except DSFileLoadError as exc:
pass
pandas_patch.assert_called_once_with("test/path/to/frame.pkl")
self.test_frame.drop("is_dutch", axis=1)
with patch("core.utils.data.numeric_features.pd.read_pickle", return_value=self.test_frame) as pandas_patch:
try:
self.frame.from_disk("test/path/to/frame.pkl")
self.fail("NumericFeatureFrame.from_disk did not raise an assertion when loading wrong data")
except DSFileLoadError:
pass
pandas_patch.assert_called_once_with("test/path/to/frame.pkl")
self.test_frame.drop("extra", axis=1)
with patch("core.utils.data.numeric_features.pd.read_pickle", return_value=self.test_frame) as pandas_patch:
try:
self.frame.from_disk("test/path/to/frame.pkl")
self.fail("NumericFeatureFrame.from_disk did not raise an assertion when loading too little data")
except DSFileLoadError:
pass
pandas_patch.assert_called_once_with("test/path/to/frame.pkl")
def test_adding_features(self):
features = [
TestNumericFeaturesFrame.is_dutch
]
frame = NumericFeaturesFrame(
TestNumericFeaturesFrame.get_identifier,
features,
self.get_iterator
)
frame.load_features([
TestNumericFeaturesFrame.value_number,
TestNumericFeaturesFrame.is_english
])
assert_frame_equal(frame.data, self.test_frame, check_like=True)
sorted_feature_names = ["is_dutch", "is_english", "value_number"]
self.assertEqual(
sorted(self.frame.features.keys()),
sorted_feature_names
)
def test_adding_content(self):
self.frame.load_content(self.get_extra_iterator)
assert_frame_equal(self.frame.data, self.test_frame_extra, check_like=True)
def test_adding_content_mixed(self):
self.skipTest("Bug: GH-109")
old = list(self.get_iterator())[-2:]
def update(ind):
ind.properties["value"] = int(ind.properties["value"]) * 5
return ind
updated = list(map(update, old))
self.frame.load_content(
lambda: iter(list(self.get_extra_iterator()) + updated)
)
self.test_frame_extra["value_number"].loc[[7, 8]] *= 5
assert_frame_equal(self.frame.data, self.test_frame_extra, check_like=True)
def test_resetting_features_and_content(self):
features = [
TestNumericFeaturesFrame.is_dutch
]
frame = NumericFeaturesFrame(
TestNumericFeaturesFrame.get_identifier,
features,
self.get_iterator
)
frame.reset(
features=[
TestNumericFeaturesFrame.value_number,
TestNumericFeaturesFrame.is_english
],
content=self.get_extra_iterator
)
self.test_frame_extra = self.test_frame_extra.drop([4, 5, 6, 7, 8], axis=0)
self.test_frame_extra = self.test_frame_extra.drop(labels="is_dutch", axis=1)
assert_frame_equal(frame.data, self.test_frame_extra, check_like=True)
sorted_feature_names = ["is_english", "value_number"]
self.assertEqual(
sorted(frame.features.keys()),
sorted_feature_names
)
def test_resetting_features(self):
features = [
TestNumericFeaturesFrame.is_dutch
]
frame = NumericFeaturesFrame(
TestNumericFeaturesFrame.get_identifier,
features,
self.get_iterator
)
frame.reset(features=[
TestNumericFeaturesFrame.value_number,
TestNumericFeaturesFrame.is_english
])
self.test_frame = self.test_frame.drop(labels="is_dutch", axis=1)
assert_frame_equal(frame.data, self.test_frame, check_like=True)
sorted_feature_names = ["is_english", "value_number"]
self.assertEqual(
sorted(frame.features.keys()),
sorted_feature_names
)
def test_resetting_features_no_content(self):
features = [
TestNumericFeaturesFrame.is_dutch
]
frame = NumericFeaturesFrame(
TestNumericFeaturesFrame.get_identifier,
features
)
frame.reset(features=[
TestNumericFeaturesFrame.value_number,
TestNumericFeaturesFrame.is_english
])
self.test_frame = self.test_frame.drop(labels="is_dutch", axis=1)
assert_frame_equal(frame.data, self.test_frame[0:0], check_like=True)
sorted_feature_names = ["is_english", "value_number"]
self.assertEqual(
sorted(frame.features.keys()),
sorted_feature_names
)
def test_resetting_content(self):
self.frame.reset(content=self.get_extra_iterator)
self.test_frame_extra = self.test_frame_extra.drop([4, 5, 6, 7, 8], axis=0)
assert_frame_equal(self.frame.data, self.test_frame_extra, check_like=True)
def test_resetting_content_no_features(self):
self.frame.features = None
self.frame.reset(content=self.get_extra_iterator)
self.assertEqual(self.frame.content.__name__, self.get_extra_iterator.__name__) # TODO: better equality test
assert_frame_equal(self.frame.data, pd.DataFrame(dtype=np.float), check_like=True)
def test_clean_params(self):
test_params = {
"is_dutch": "1", # get converted to float
"is_french": 1.0, # gets skipped
"$is_french": 1.0, # gets skipped (without errors)
"value_number": None, # gets skipped a a non-numeric
"is_english": "test", # gets skipped as a non-numeric
"$value_number": 2.0
}
for function in [str, int, float]:
test_params["is_dutch"] = function(test_params["is_dutch"])
cleaned_params = self.frame.clean_params(test_params)
self.assertEqual(cleaned_params, {"is_dutch": 1.0, "value_number": 2.0})
test_error_params = {
"is_dutch": "1",
"$is_dutch": 1.0,
}
try:
self.frame.clean_params(test_error_params)
self.fail("Clean params should have raised for invalid params")
except ValueError:
pass
def test_rank_by_params(self):
ranking = self.frame.rank_by_params({"is_dutch": 1, "value_number": 1})
self.assertEqual(ranking, [5, 8, 6, 4, 7])
ranking = self.frame.rank_by_params({"is_dutch": 0.5, "value_number": -1, "is_english": 2, "is_french": 100})
self.assertEqual(ranking, [7, 8, 6, 4, 5])
def test_get_content_hash(self):
self.skipTest("not tested")
def test_get_feature_value(self):
self.skipTest("not tested")
def test_get_feature_series(self):
self.skipTest("not tested")
| gpl-3.0 |
crukci-bioinformatics/claritypy-ngsreports | legacy/audit_clarity.py | 2 | 16330 | """
Python script create_billing_summary
Created by Anne Pajon under user 'pajon01' on 09/06/2016
"""
import csv
import json
import numpy as np
import os
import pandas as pd
import shutil
import string
import subprocess
import logging
import argparse
from dateutil.relativedelta import *
from dateutil.parser import *
# logging configuration
import log as logger
# import site specific configurations
from config import CONFIG
import glsclient.glsclient as glsclient
def get_svn_version(file):
cmd = ["svn", "info", file]
process = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = process.communicate()[0]
lines = out.split('\n')
info_found = False
version = ''
for line in lines:
if line.startswith('Name:'):
version += line.split(': ')[1] + ' '
if line.startswith('Revision:'):
version += 'r' + line.split(': ')[1] + ' '
info_found = True
if line.startswith('Last Changed Date:'):
version += line.split(': ')[1]
if not info_found:
version += 'N/A'
return version
def percentile(n):
def percentile_(x):
return np.percentile(x, n)
percentile_.__name__ = '%s%%' % n
return percentile_
class AuditReport(object):
def __init__(self, config, gls_server, this_month, prices_filename, query_filename, data_filename, start_month=None):
self.log = logging.getLogger('ngsreports')
self.config = config
self.gls = glsclient.GlsUtil(server=gls_server)
self.this_month = this_month
self.next_month = (parse(self.this_month) + relativedelta(months=+1)).strftime('%Y-%m')
self.data_basedir = '%s_%s' % (this_month.replace('-', ''), self.config.get('data', 'data_basedir'))
if start_month:
self.this_month = start_month
self.data_basedir = '%s_cumulative_%s' % (this_month.replace('-', ''), self.config.get('data', 'data_basedir'))
# --- Loading pricing -----------------------------------------------------
self.prices = self.get_prices(os.path.join(self.config.get('pricing', 'prices_basedir'), prices_filename))
# --- Loading billing -----------------------------------------------------
self.query_file = os.path.join(self.config.get('query', 'query_basedir'), query_filename)
self.data_file = os.path.join(self.data_basedir, data_filename)
self.audit_file = os.path.join(self.data_basedir, 'audit-' + data_filename.replace('.csv', '.txt'))
self.finance_file = os.path.join(self.data_basedir, 'finance-' + data_filename)
self.billing_records = self.get_billing_records()
# --- Create audit reports ------------------------------------------------
self.report, self.cost_for_billable_records = self.audit_clarity()
def get_prices(self, prices_file):
self.log.info('==============================================================')
self.log.info('>>> Loading pricing information from file %s' % prices_file)
prices = pd.read_csv(prices_file, sep='\t')
prices = pd.melt(prices, id_vars=['Type'],
value_vars=['Total Price', 'Consumables Only', 'Ad hoc (x1.5)', 'Commercial (x1.5)'])
prices['version'] = get_svn_version(prices_file)
return prices
def get_billing_records(self):
self.log.info('==============================================================')
if not os.path.exists(self.data_file):
self.log.info('>>> Retrieving billing records from Clarity LiMS')
with open(self.query_file) as f:
billing_query = string.Template(f.read()).safe_substitute(
{'thismonth': self.this_month, 'nextmonth': self.next_month})
output_query_file = os.path.join(self.data_basedir, os.path.basename(self.query_file))
with open(output_query_file, 'w') as o:
o.write(billing_query)
self.gls.db.execute(billing_query)
billing_data = self.gls.db.fetchall()
self.log.debug(billing_data)
if not os.path.exists(os.path.dirname(self.data_file)):
os.makedirs(os.path.dirname(self.data_file))
with open(self.data_file, 'w') as f:
billing_column_names = []
for d in self.gls.db.description:
billing_column_names.append(d.name)
w = csv.DictWriter(f, billing_column_names, delimiter=';')
w.writeheader()
w.writerows(billing_data)
try:
self.log.info('>>> Loading billing information from file %s' % self.data_file)
billing_records = pd.read_csv(self.data_file, sep=';')
billing_month = 'N/A'
if len(billing_records) > 0:
billing_month = billing_records['billingmonth'][0]
self.log.info('>>> %s records from Clarity LiMS retrieved for %s' % (len(billing_records), billing_month))
return billing_records
except ValueError, e:
self.log.error('Empty file', e)
return
def audit_clarity(self):
pd.set_option('display.precision', 3)
pd.set_option('max_colwidth', 80)
billing_month = 'N/A'
if len(self.billing_records) > 0:
billing_month = self.billing_records['billingmonth'][0]
report = '==============================================================\n'
report += '--------------------------------------------------------------\n'
report += '--- CLARITY BILLING AUDIT REPORT - %s -------------------\n' % billing_month
report += '--------------------------------------------------------------\n'
report += '--- Billing Summary ------------------------------------------\n'
report += '>>> Total number of records: %s record(s)\n' % len(self.billing_records)
by_type = self.billing_records.pivot_table(index='type', columns='billable', values='description', aggfunc='count')
if not by_type.empty:
by_type.loc['Total'] = by_type.sum()
report += by_type.to_string() + '\n'
report += '--------------------------------------------------------------\n'
non_billable_records = self.billing_records[self.billing_records['billable'] == 'Do not bill']
report += '>>> Total number of non billable records: %s record(s)\n' % len(non_billable_records)
report += non_billable_records.to_string() + '\n'
report += '--------------------------------------------------------------\n'
billable_records = self.billing_records[self.billing_records['billable'] == 'Bill']
report += '>>> Total number of billable records: %s record(s)\n' % len(billable_records)
report += '--------------------------------------------------------------\n'
by_external = billable_records.pivot_table(index='type', columns='external', values='description',
aggfunc='count')
if not by_external.empty:
by_external.loc['Total'] = by_external.sum()
report += '>>> Distribution of Internal vs External billable records\n'
report += by_external.to_string() + '\n'
if 'yield' in billable_records.columns:
report += '--------------------------------------------------------------\n'
summary = billable_records[['type', 'yield']].groupby('type').agg(
{'yield': ['count', np.sum, np.mean, np.min, np.max, np.median, percentile(25), percentile(75)]})
summary.loc['Total'] = summary.sum()
report += '>>> Distribution of yield across billable records\n'
report += summary.to_string() + '\n'
report += '--------------------------------------------------------------\n'
missing_billable_info = billable_records[billable_records['billable'].isnull()]
report += '>>> Billable records without billable status: %s record(s)\n' % len(missing_billable_info)
if len(missing_billable_info) > 0:
report += missing_billable_info.to_string() + '\n'
report += '--------------------------------------------------------------\n'
duplicated_records = billable_records[billable_records.duplicated(['description'], keep=False)]
report += '>>> Duplicated billable records: %s record(s)\n' % len(duplicated_records)
if len(duplicated_records) > 0:
report += duplicated_records.to_string() + '\n'
report += '==============================================================\n'
report += '--------------------------------------------------------------\n'
report += '--- CLARITY BILLING SUMMARY REPORT - %s -----------------\n' % billing_month
report += '--------------------------------------------------------------\n'
by_lab = billable_records.pivot_table(index='lab', columns='type', values='description', aggfunc='count')
by_lab.fillna(0, inplace=True)
by_lab['Total'] = by_lab.sum(axis=1)
by_lab.loc['Total'] = by_lab.sum()
report += by_lab.to_string() + '\n'
report += '--------------------------------------------------------------\n'
cost_for_billable_records = pd.merge(billable_records, self.prices, left_on=['type', 'pricing'],
right_on=['Type', 'variable'], how='left')
cost_by_lab = cost_for_billable_records.pivot_table(index='lab', columns='type', values='value', aggfunc='sum')
cost_by_lab.fillna(0, inplace=True)
cost_by_lab['Total'] = cost_by_lab.sum(axis=1)
cost_by_lab.loc['Total'] = cost_by_lab.sum()
report += cost_by_lab.to_string() + '\n'
report += '==============================================================\n'
report += '--------------------------------------------------------------\n'
report += '--- PRICING TABLE - %s ----------------------------------\n' % billing_month
report += '--------------------------------------------------------------\n'
report += self.prices.to_string() + '\n'
return report, cost_for_billable_records
def write_files(self):
with open(self.audit_file, 'w') as f:
f.write(self.report)
with open(self.finance_file, 'w') as f:
f.write(self.cost_for_billable_records.to_csv())
def get_platform_data(seq_institute_data, platform='HiSeq'):
only_this_platform = seq_institute_data[(seq_institute_data.type.str.startswith(platform))][['lab', 'type']]
return only_this_platform.groupby([only_this_platform.lab], as_index=False).agg(np.size)
def create_html_reports(log, data_basedir, seq_records, lps_records, cumulative=None):
template = os.path.join(CONFIG.get('report', 'html_template_basedir'),
CONFIG.get('report', 'institute_html_template_filename'))
billing_month = ''
if len(seq_records) > 0:
billing_months = list(set(seq_records.billingmonth))
billing_month = sorted(billing_months)[0]
if cumulative:
billing_month = '%s-cumulative' % billing_month
for institute in set(list(seq_records.institute) + list(lps_records.institute)):
log.info('---> Generating HTML report for %s' % institute)
# Sequencing data -----------------------------------------------------
seq_institute_data = seq_records[(seq_records.institute == institute)]
seq_institute_data.fillna(0, inplace=True)
seq_records['thisinstitute'] = np.where(seq_records['institute'] == institute, 'in', 'out')
by_institute = seq_records.pivot_table(index='type', columns='thisinstitute', values='description',
aggfunc='count')
by_institute.fillna(0, inplace=True)
by_type = seq_institute_data.pivot_table(index='type', columns='lab', values='description', aggfunc='count')
if len(seq_institute_data) > 0:
by_type.loc['Total'] = by_type.sum()
by_institute['type'] = by_institute.index
categories = json.dumps(by_institute['type'].astype(str).values.tolist())
in_data = '[]'
sum_in_data = 0
if len(seq_institute_data) > 0:
in_data = (by_institute['in'].values.tolist())
sum_in_data = sum(in_data)
out_data = (by_institute['out'].values.tolist())
hiseq_data = get_platform_data(seq_institute_data, 'HiSeq').values.tolist()
miseq_data = get_platform_data(seq_institute_data, 'MiSeq').values.tolist()
seq_table = json.dumps(seq_institute_data[
['lab', 'researcher', 'slxid', 'type', 'flowcellid', 'lane', 'yield', 'billable',
'billingmonth', 'value', 'billingcode']].astype(str).values.tolist())
# Library prep data ---------------------------------------------------
lps_institute_data = lps_records[(lps_records.institute == institute)]
lps_institute_data.fillna(0, inplace=True)
lps_institute_data['costpersample'] = lps_institute_data['value']
by_samples = lps_institute_data.groupby(
['lab', 'researcher', 'slxid', 'type', 'costpersample', 'billingmonth', 'billingcode'],
as_index=False).agg({'samplename': np.size, 'value': np.sum})
lps_table = json.dumps(by_samples[['lab', 'researcher', 'slxid', 'samplename', 'type', 'billingmonth',
'costpersample', 'value', 'billingcode']].astype(str).values.tolist())
# Writing HTML report per institute -----------------------------------
with open(template) as t:
output_file = os.path.join(data_basedir, '%s-%s.html' % (billing_month, institute.replace('/', '').replace(' ', '').replace('-', '').lower()))
with open(output_file, 'w') as f:
f.write(string.Template(t.read()).safe_substitute(
{'categories': categories, 'institute': institute, 'date': billing_month,
'institute_data': in_data, 'others_data': out_data, 'institute_capacity': sum_in_data,
'others_capacity': sum(out_data), 'hiseq': hiseq_data, 'miseq': miseq_data,
'billing_table': seq_table, 'lps_billing_table': lps_table}))
def main():
# command line options
parser = argparse.ArgumentParser()
parser.add_argument("--month", dest="this_month", action="store", help="The year and month for generating audit reports e.g. '2017-01'", required=True)
parser.add_argument("--cumulative", dest="start_month", action="store", help="The year and month for cumulative audit reports e.g. '2016-04'")
options = parser.parse_args()
if options.start_month:
log = logger.get_custom_logger(os.path.join('%s_cumulative_%s' % (options.this_month.replace('-', ''), CONFIG.get('data', 'data_basedir')), CONFIG.get('logging', 'logfile')))
else:
log = logger.get_custom_logger(os.path.join('%s_%s' % (options.this_month.replace('-', ''), CONFIG.get('data', 'data_basedir')), CONFIG.get('logging', 'logfile')))
log.info('Running Audit Clarity from %s script for %s' % (os.path.basename(__file__), options.this_month))
try:
# Create billing and audit reports for sequencing
seq = AuditReport(CONFIG, glsclient.SERVER, options.this_month, CONFIG.get('pricing', 'seq_prices_filename'), CONFIG.get('query', 'seq_query_filename'), CONFIG.get('data', 'seq_data_filename'), options.start_month)
seq.write_files()
# and for library prep
lps = AuditReport(CONFIG, glsclient.SERVER, options.this_month, CONFIG.get('pricing', 'lps_prices_filename'), CONFIG.get('query', 'lps_query_filename'), CONFIG.get('data', 'lps_data_filename'), options.start_month)
lps.write_files()
# Create institute HTML reports
create_html_reports(log, seq.data_basedir, seq.cost_for_billable_records, lps.cost_for_billable_records, options.start_month)
except Exception, e:
log.exception("Unexpected error")
log.exception(e)
if __name__ == '__main__':
main()
| mit |
murali-munna/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/tests/io/parser/test_read_fwf.py | 7 | 15261 | # -*- coding: utf-8 -*-
"""
Tests the 'read_fwf' function in parsers.py. This
test suite is independent of the others because the
engine is set to 'python-fwf' internally.
"""
from datetime import datetime
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame
from pandas import compat
from pandas.compat import StringIO, BytesIO
from pandas.io.parsers import read_csv, read_fwf, EmptyDataError
class TestFwfParsing(object):
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = read_csv(StringIO(data_expected),
engine='python', header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From Thomas Kluyver: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assert_raises_regex(ValueError,
"must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assert_raises_regex(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_BytesIO_input(self):
if not compat.PY3:
pytest.skip(
"Bytes-related test - only needs to work on Python 3")
result = read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
def test_fwf_colspecs_is_list_or_tuple(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
with tm.assert_raises_regex(TypeError,
'column specifications must '
'be a list or tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(data),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
with tm.assert_raises_regex(TypeError,
'Each column specification '
'must be.+'):
read_fwf(StringIO(data), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
assert len(res)
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71""" # noqa
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn",
"dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
pytest.skip("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = np.array([[1, 2., 4],
[5, np.nan, 10.]])
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = np.array([[1, 2334., 5],
[10, 13, 10]])
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_bool_header_arg(self):
# see gh-6114
data = """\
MyColumn
a
b
a
b"""
for arg in [True, False]:
with pytest.raises(TypeError):
read_fwf(StringIO(data), header=arg)
def test_full_file(self):
# File with all values
test = """index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo"""
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = """index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34"""
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = """
Account Name Balance CreditLimit AccountCreated
101 Keanu Reeves 9315.45 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00 8/6/2003
868 Jennifer Love Hewitt 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65 5000.00 2/5/2007
""".strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columsn
test = """
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00 8/6/2003
868 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65
""".strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_messed_up_data(self):
# Completely messed up file
test = """
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65
""".strip('\r\n')
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_multiple_delimiters(self):
test = r"""
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~Keanu Reeves
33+++122.33\\\bar.........Gerard Butler
++44~~~~12.01 baz~~Jennifer Love Hewitt
~~55 11+++foo++++Jada Pinkett-Smith
..66++++++.03~~~bar Bill Murray
""".strip('\r\n')
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf(StringIO(test), colspecs=colspecs,
delimiter=' +~.\\')
tm.assert_frame_equal(expected, read_fwf(StringIO(test),
delimiter=' +~.\\'))
def test_variable_width_unicode(self):
if not compat.PY3:
pytest.skip(
'Bytes-related test - only needs to work on Python 3')
test = """
שלום שלום
ום שלל
של ום
""".strip('\r\n')
expected = read_fwf(BytesIO(test.encode('utf8')),
colspecs=[(0, 4), (5, 9)],
header=None, encoding='utf8')
tm.assert_frame_equal(expected, read_fwf(
BytesIO(test.encode('utf8')), header=None, encoding='utf8'))
def test_dtype(self):
data = """ a b c
1 2 3.2
3 4 5.2
"""
colspecs = [(0, 5), (5, 10), (10, None)]
result = pd.read_fwf(StringIO(data), colspecs=colspecs)
expected = pd.DataFrame({
'a': [1, 3],
'b': [2, 4],
'c': [3.2, 5.2]}, columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
expected['a'] = expected['a'].astype('float64')
expected['b'] = expected['b'].astype(str)
expected['c'] = expected['c'].astype('int32')
result = pd.read_fwf(StringIO(data), colspecs=colspecs,
dtype={'a': 'float64', 'b': str, 'c': 'int32'})
tm.assert_frame_equal(result, expected)
def test_skiprows_inference(self):
# GH11256
test = """
Text contained in the file header
DataCol1 DataCol2
0.0 1.0
101.6 956.1
""".strip()
expected = read_csv(StringIO(test), skiprows=2,
delim_whitespace=True)
tm.assert_frame_equal(expected, read_fwf(
StringIO(test), skiprows=2))
def test_skiprows_by_index_inference(self):
test = """
To be skipped
Not To Be Skipped
Once more to be skipped
123 34 8 123
456 78 9 456
""".strip()
expected = read_csv(StringIO(test), skiprows=[0, 2],
delim_whitespace=True)
tm.assert_frame_equal(expected, read_fwf(
StringIO(test), skiprows=[0, 2]))
def test_skiprows_inference_empty(self):
test = """
AA BBB C
12 345 6
78 901 2
""".strip()
with pytest.raises(EmptyDataError):
read_fwf(StringIO(test), skiprows=3)
| agpl-3.0 |
benthomasson/stdeb | stdeb/util.py | 3 | 60220 | #
# This module contains most of the code of stdeb.
#
import re, sys, os, shutil, select
import codecs
try:
# Python 2.x
import ConfigParser
except ImportError:
# Python 3.x
import configparser as ConfigParser
import subprocess
import tempfile
import stdeb
from stdeb import log, __version__ as __stdeb_version__
if hasattr(os,'link'):
link_func = os.link
else:
# matplotlib deletes link from os namespace, expected distutils workaround
link_func = shutil.copyfile
__all__ = ['DebianInfo','build_dsc','expand_tarball','expand_zip',
'stdeb_cmdline_opts','stdeb_cmd_bool_opts','recursive_hardlink',
'apply_patch','repack_tarball_with_debianized_dirname',
'expand_sdist_file','stdeb_cfg_options']
DH_MIN_VERS = '7' # Fundamental to stdeb >= 0.4
DH_IDEAL_VERS = '7.4.3' # fixes Debian bug 548392
PYTHON_ALL_MIN_VERS = '2.6.6-3'
try:
# Python 2.x
from exceptions import Exception
except ImportError:
# Python 3.x
pass
class CalledProcessError(Exception): pass
class CantSatisfyRequirement(Exception): pass
def check_call(*popenargs, **kwargs):
retcode = subprocess.call(*popenargs, **kwargs)
if retcode == 0:
return
raise CalledProcessError(retcode)
if sys.version_info[0]==2:
help_str_py2='If True, build package for python 2. (Default=True).'
help_str_py3='If True, build package for python 3. (Default=False).'
else:
assert sys.version_info[0]==3
help_str_py2='If True, build package for python 2. (Default=False).'
help_str_py3='If True, build package for python 3. (Default=True).'
stdeb_cmdline_opts = [
('dist-dir=', 'd',
"directory to put final built distributions in (default='deb_dist')"),
('patch-already-applied','a',
'patch was already applied (used when py2dsc calls sdist_dsc)'),
('default-distribution=', None,
"deprecated (see --suite)"),
('suite=', 'z',
"distribution name to use if not specified in .cfg (default='unstable')"),
('default-maintainer=', None,
'deprecated (see --maintainer)'),
('maintainer=', 'm',
'maintainer name and email to use if not specified in .cfg '
'(default from setup.py)'),
('extra-cfg-file=','x',
'additional .cfg file (in addition to stdeb.cfg if present)'),
('patch-file=','p',
'patch file applied before setup.py called '
'(incompatible with file specified in .cfg)'),
('patch-level=','l',
'patch file applied before setup.py called '
'(incompatible with file specified in .cfg)'),
('patch-posix','q',
'apply the patch with --posix mode'),
('remove-expanded-source-dir','r',
'remove the expanded source directory'),
('ignore-install-requires', 'i',
'ignore the requirements from requires.txt in the egg-info directory'),
('pycentral-backwards-compatibility=',None,
'This option has no effect, is here for backwards compatibility, and may '
'be removed someday.'),
('workaround-548392=',None,
'This option has no effect, is here for backwards compatibility, and may '
'be removed someday.'),
('no-backwards-compatibility',None,
'This option has no effect, is here for backwards compatibility, and may '
'be removed someday.'),
('guess-conflicts-provides-replaces=',None,
'If True, attempt to guess Conflicts/Provides/Replaces in debian/control '
'based on apt-cache output. (Default=False).'),
('with-python2=',None,
help_str_py2),
('with-python3=',None,
help_str_py3),
('no-python2-scripts=',None,
'If True, do not install scripts for python 2. (Default=False).'),
('no-python3-scripts=',None,
'If True, do not install scripts for python 3. (Default=False).'),
('force-x-python3-version',None,
'Override default minimum python3:any dependency with value from '
'x-python3-version'),
('allow-virtualenv-install-location',None,
'Allow installing into /some/random/virtualenv-path'),
('sign-results',None,
'Use gpg to sign the resulting .dsc and .changes file'),
]
# old entries from stdeb.cfg:
# These should be settable as distutils command options, but in case
# we want to support other packaging methods, they should also be
# settable outside distutils. Consequently, we keep the ability to
# parse ConfigParser files (specified with --extra-cfg-file). TODO:
# Also, some (most, in fact) of the above options should also be
# settable in the ConfigParser file.
stdeb_cfg_options = [
# With defaults
('source=',None,
'debian/control Source: (Default: <source-debianized-setup-name>)'),
('package=',None,
'debian/control Package: (Default: python-<debianized-setup-name>)'),
('package3=',None,
'debian/control Package: (Default: python3-<debianized-setup-name>)'),
('suite=',None,
'suite (e.g. stable, lucid) in changelog (Default: unstable)'),
('maintainer=',None,
'debian/control Maintainer: (Default: <setup-maintainer-or-author>)'),
('debian-version=',None,'debian version (Default: 1)'),
('section=',None,'debian/control Section: (Default: python)'),
# With no defaults
('epoch=',None,'version epoch'),
('forced-upstream-version=',None,'forced upstream version'),
('upstream-version-prefix=',None,'upstream version prefix'),
('upstream-version-suffix=',None,'upstream version suffix'),
('uploaders=',None,'uploaders'),
('copyright-file=',None,'copyright file'),
('build-depends=',None,'debian/control Build-Depends:'),
('build-conflicts=',None,'debian/control Build-Conflicts:'),
('stdeb-patch-file=',None,'file containing patches for stdeb to apply'),
('stdeb-patch-level=',None,'patch level provided to patch command'),
('depends=',None,'debian/control Depends:'),
('depends3=',None,'debian/control Depends:'),
('suggests=',None,'debian/control Suggests:'),
('suggests3=',None,'debian/control Suggests:'),
('recommends=',None,'debian/control Recommends:'),
('recommends3=',None,'debian/control Recommends:'),
('xs-python-version=',None,'debian/control XS-Python-Version:'),
('x-python3-version=',None,'debian/control X-Python3-Version:'),
('dpkg-shlibdeps-params=',None,'parameters passed to dpkg-shlibdeps'),
('conflicts=',None,'debian/control Conflicts:'),
('conflicts3=',None,'debian/control Conflicts:'),
('provides=',None,'debian/control Provides:'),
('provides3=',None,'debian/control Provides3:'),
('replaces=',None,'debian/control Replaces:'),
('replaces3=',None,'debian/control Replaces3:'),
('mime-desktop-files=',None,'MIME desktop files'),
('mime-file=',None,'MIME file'),
('shared-mime-file=',None,'shared MIME file'),
('setup-env-vars=',None,'environment variables passed to setup.py'),
('udev-rules=',None,'file with rules to install to udev'),
]
stdeb_cmd_bool_opts = [
'patch-already-applied',
'remove-expanded-source-dir',
'patch-posix',
'ignore-install-requires',
'no-backwards-compatibility',
'force-x-python3-version',
'allow-virtualenv-install-location',
'sign-results',
]
class NotGiven: pass
def process_command(args, cwd=None):
if not isinstance(args, (list, tuple)):
raise RuntimeError("args passed must be in a list")
check_call(args, cwd=cwd)
def recursive_hardlink(src,dst):
dst = os.path.abspath(dst)
orig_dir = os.path.abspath(os.curdir)
os.chdir(src)
try:
for root,dirs,files in os.walk(os.curdir):
for file in files:
fullpath = os.path.normpath(os.path.join(root,file))
dirname, fname = os.path.split(fullpath)
dstdir = os.path.normpath(os.path.join(dst,dirname))
if not os.path.exists(dstdir):
os.makedirs(dstdir)
newpath = os.path.join(dstdir,fname)
if os.path.exists(newpath):
if os.path.samefile(fullpath,newpath):
continue
else:
os.unlink(newpath)
#print 'linking %s -> %s'%(fullpath,newpath)
link_func(fullpath,newpath)
finally:
os.chdir(orig_dir)
def debianize_name(name):
"make name acceptable as a Debian (binary) package name"
name = name.replace('_','-')
name = name.lower()
return name
def source_debianize_name(name):
"make name acceptable as a Debian source package name"
name = name.replace('_','-')
name = name.replace('.','-')
name = name.lower()
return name
def debianize_version(name):
"make name acceptable as a Debian package name"
# XXX should use setuptools' version sorting and do this properly:
name = name.replace('.dev','~dev')
name = name.lower()
return name
def dpkg_compare_versions(v1,op,v2):
args = ['/usr/bin/dpkg','--compare-versions',v1,op,v2]
cmd = subprocess.Popen(args)
returncode = cmd.wait()
if returncode:
return False
else:
return True
def get_cmd_stdout(args):
cmd = subprocess.Popen(args,stdout=subprocess.PIPE)
returncode = cmd.wait()
if returncode:
log.error('ERROR running: %s', ' '.join(args))
raise RuntimeError('returncode %d', returncode)
return cmd.stdout.read()
def normstr(s):
try:
# Python 3.x
result = str(s,'utf-8')
except TypeError:
# Python 2.x
result = s
return result
def get_date_822():
"""return output of 822-date command"""
cmd = '/bin/date'
if not os.path.exists(cmd):
raise ValueError('%s command does not exist.'%cmd)
args = [cmd,'-R']
result = get_cmd_stdout(args).strip()
result = normstr(result)
return result
def get_version_str(pkg):
args = ['/usr/bin/dpkg-query','--show',
'--showformat=${Version}',pkg]
stdout = get_cmd_stdout(args)
return stdout.strip().decode('ascii')
def load_module(name,fname):
import imp
suffix = '.py'
found = False
for description in imp.get_suffixes():
if description[0]==suffix:
found = True
break
assert found
fd = open(fname,mode='r')
try:
module = imp.load_module(name,fd,fname,description)
finally:
fd.close()
return module
def get_deb_depends_from_setuptools_requires(requirements, on_failure="warn"):
"""
Suppose you can't confidently figure out a .deb which satisfies a given
requirement. If on_failure == 'warn', then log a warning. If on_failure
== 'raise' then raise CantSatisfyRequirement exception. If on_failure ==
'guess' then guess that python-$FOO will satisfy the dependency and that
the Python version numbers will apply to the Debian packages (in addition
to logging a warning message).
"""
assert on_failure in ("raise", "warn", "guess"), on_failure
import pkg_resources
depends = [] # This will be the return value from this function.
parsed_reqs=[]
for extra,reqs in pkg_resources.split_sections(requirements):
if extra: continue
parsed_reqs.extend(pkg_resources.parse_requirements(reqs))
if not parsed_reqs:
return depends
if not os.path.exists('/usr/bin/apt-file'):
raise ValueError('apt-file not in /usr/bin. Please install '
'with: sudo apt-get install apt-file')
# Ask apt-file for any packages which have a .egg-info file by
# these names.
# Note that apt-file appears to think that some packages
# e.g. setuptools itself have "foo.egg-info/BLAH" files but not a
# "foo.egg-info" directory.
egginfore=("(/(%s)(?:-[^/]+)?(?:-py[0-9]\.[0-9.]+)?\.egg-info)"
% '|'.join(req.project_name.replace('-', '_') for req in parsed_reqs))
args = ["apt-file", "search", "--ignore-case", "--regexp", egginfore]
if 1:
# do dry run on apt-file
dry_run_args = args[:] + ['--dummy','--non-interactive']
cmd = subprocess.Popen(dry_run_args,stderr=subprocess.PIPE)
returncode = cmd.wait()
if returncode:
err_output = cmd.stderr.read()
raise RuntimeError('Error running "apt-file search": ' +
err_output.strip())
try:
cmd = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
except Exception as le:
# TODO: catch rc=1 and "E: The cache directory is empty. You need to
# run 'apt-file update' first.", and tell the user to follow those
# instructions.
log.error('ERROR running: %s', ' '.join(args))
raise RuntimeError('exception %s from subprocess %s' % (le,args))
returncode = cmd.wait()
if returncode:
log.error('ERROR running: %s', ' '.join(args))
raise RuntimeError('returncode %d from subprocess %s' % (returncode,
args))
inlines = cmd.stdout.readlines()
dd = {} # {pydistname: {pydist: set(debpackagename)}}
E=re.compile(egginfore, re.I)
D=re.compile("^([^:]*):", re.I)
eggsndebs = set()
for l in inlines:
if l:
emo = E.search(l)
assert emo, l
dmo = D.search(l)
assert dmo, l
eggsndebs.add((emo.group(1), dmo.group(1)))
for (egginfo, debname) in eggsndebs:
pydist = pkg_resources.Distribution.from_filename(egginfo)
try:
dd.setdefault(
pydist.project_name.lower(), {}).setdefault(
pydist, set()).add(debname)
except ValueError as le:
log.warn("I got an error parsing a .egg-info file named \"%s\" "
"from Debian package \"%s\" as a pkg_resources "
"Distribution: %s" % (egginfo, debname, le,))
pass
# Now for each requirement, see if a Debian package satisfies it.
ops = {'<':'<<','>':'>>','==':'=','<=':'<=','>=':'>='}
for req in parsed_reqs:
reqname = req.project_name.lower()
gooddebs = set()
for pydist, debs in dd.get(reqname, {}).iteritems():
if pydist in req:
## log.info("I found Debian packages \"%s\" which provides "
## "Python package \"%s\", version \"%s\", which "
## "satisfies our version requirements: \"%s\""
## % (', '.join(debs), req.project_name, ver, req)
gooddebs |= (debs)
else:
log.info("I found Debian packages \"%s\" which provides "
"Python package \"%s\" which "
"does not satisfy our version requirements: "
"\"%s\" -- ignoring."
% (', '.join(debs), req.project_name, req))
if not gooddebs:
if on_failure == 'warn':
log.warn(
"I found no Debian package which provides the required "
"Python package \"%s\" with version requirements "
"\"%s\"."% (req.project_name, req.specs))
elif on_failure == "raise":
raise CantSatisfyRequirement(
"I found no Debian package which "
"provides the required Python package \"%s\" with version "
"requirements \"%s\"." % (req.project_name, req.specs), req)
elif on_failure == "guess":
log.warn("I found no Debian package which provides the "
"required Python package \"%s\" with version "
"requirements \"%s\". Guessing blindly that the "
"name \"python-%s\" will be it, and that the Python "
"package version number requirements will apply to "
"the Debian package." % (req.project_name,
req.specs, reqname))
gooddebs.add("python-" + reqname)
elif len(gooddebs) == 1:
log.info("I found a Debian package which provides the require "
"Python package. Python package: \"%s\", "
"Debian package: \"%s\"; adding Depends specifications "
"for the following version(s): \"%s\""
% (req.project_name, tuple(gooddebs)[0], req.specs))
else:
log.warn("I found multiple Debian packages which provide the "
"Python distribution required. I'm listing them all "
"as alternates. Candidate debs which claim to provide "
"the Python package \"%s\" are: \"%s\""
% (req.project_name, ', '.join(gooddebs),))
alts = []
for deb in gooddebs:
added_any_alt = False
for spec in req.specs:
# Here we blithely assume that the Debian package
# versions are enough like the Python package versions
# that the requirement can be ported straight over...
alts.append("%s (%s %s)" % (deb, ops[spec[0]], spec[1]))
added_any_alt = True
if not added_any_alt:
# No alternates were added, but we have the name of a
# good package.
alts.append("%s"%deb)
if len(alts):
depends.append(' | '.join(alts))
return depends
def make_tarball(tarball_fname,directory,cwd=None):
"create a tarball from a directory"
if tarball_fname.endswith('.gz'): opts = 'czf'
else: opts = 'cf'
args = ['/bin/tar',opts,tarball_fname,directory]
process_command(args, cwd=cwd)
def expand_tarball(tarball_fname,cwd=None):
"expand a tarball"
if tarball_fname.endswith('.gz'): opts = 'xzf'
elif tarball_fname.endswith('.bz2'): opts = 'xjf'
else: opts = 'xf'
args = ['/bin/tar',opts,tarball_fname]
process_command(args, cwd=cwd)
def expand_zip(zip_fname,cwd=None):
"expand a zip"
unzip_path = '/usr/bin/unzip'
if not os.path.exists(unzip_path):
log.error('ERROR: {} does not exist'.format(unzip_path))
sys.exit(1)
args = [unzip_path, zip_fname]
# Does it have a top dir
res = subprocess.Popen(
[args[0], '-l', args[1]], cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
contents = []
for line in res.stdout.readlines()[3:-2]:
contents.append(line.split()[-1])
commonprefix = os.path.commonprefix(contents)
if not commonprefix:
extdir = os.path.join(cwd, os.path.basename(zip_fname[:-4]))
args.extend(['-d', os.path.abspath(extdir)])
process_command(args, cwd=cwd)
def expand_sdist_file(sdist_file,cwd=None):
lower_sdist_file = sdist_file.lower()
if lower_sdist_file.endswith('.zip'):
expand_zip(sdist_file,cwd=cwd)
elif lower_sdist_file.endswith('.tar.bz2'):
expand_tarball(sdist_file,cwd=cwd)
elif lower_sdist_file.endswith('.tar.gz'):
expand_tarball(sdist_file,cwd=cwd)
else:
raise RuntimeError('could not guess format of original sdist file')
def repack_tarball_with_debianized_dirname( orig_sdist_file,
repacked_sdist_file,
debianized_dirname,
original_dirname ):
working_dir = tempfile.mkdtemp()
expand_sdist_file( orig_sdist_file, cwd=working_dir )
fullpath_original_dirname = os.path.join(working_dir,original_dirname)
fullpath_debianized_dirname = os.path.join(working_dir,debianized_dirname)
# ensure sdist looks like sdist:
assert os.path.exists( fullpath_original_dirname )
assert len(os.listdir(working_dir))==1
if fullpath_original_dirname != fullpath_debianized_dirname:
# rename original dirname to debianized dirname
os.rename(fullpath_original_dirname,
fullpath_debianized_dirname)
make_tarball(repacked_sdist_file,debianized_dirname,cwd=working_dir)
shutil.rmtree(working_dir)
def dpkg_buildpackage(*args,**kwargs):
cwd=kwargs.pop('cwd',None)
if len(kwargs)!=0:
raise ValueError('only kwarg can be "cwd"')
"call dpkg-buildpackage [arg1] [...] [argN]"
args = ['/usr/bin/dpkg-buildpackage']+list(args)
process_command(args, cwd=cwd)
def dpkg_source(b_or_x,arg1,cwd=None):
"call dpkg-source -b|x arg1 [arg2]"
assert b_or_x in ['-b','-x']
args = ['/usr/bin/dpkg-source',b_or_x,arg1]
process_command(args, cwd=cwd)
def apply_patch(patchfile,cwd=None,posix=False,level=0):
"""call 'patch -p[level] [--posix] < arg1'
posix mode is sometimes necessary. It keeps empty files so that
dpkg-source removes their contents.
"""
if not os.path.exists(patchfile):
raise RuntimeError('patchfile "%s" does not exist'%patchfile)
fd = open(patchfile,mode='r')
level_str = '-p%d'%level
args = ['/usr/bin/patch',level_str]
if posix:
args.append('--posix')
log.info('PATCH COMMAND: %s < %s', ' '.join(args), patchfile)
log.info(' PATCHING in dir: %s', cwd)
# print >> sys.stderr, 'PATCH COMMAND:',' '.join(args),'<',patchfile
# print >> sys.stderr, ' PATCHING in dir:',cwd
res = subprocess.Popen(
args, cwd=cwd,
stdin=fd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
returncode=None
while returncode is None:
returncode = res.poll()
ready = select.select( [res.stdout,res.stderr],[],[],0.1)
# XXX figure out how to do this without reading byte-by-byte
if res.stdout in ready[0]:
sys.stdout.write(res.stdout.read(1))
sys.stdout.flush()
if res.stderr in ready[0]:
sys.stderr.write(res.stderr.read(1))
sys.stderr.flush()
# finish outputting file
sys.stdout.write(res.stdout.read())
sys.stdout.flush()
sys.stderr.write(res.stderr.read())
sys.stderr.flush()
if returncode:
log.error('ERROR running: %s', ' '.join(args))
log.error('ERROR in %s', cwd)
# print >> sys.stderr, 'ERROR running: %s'%(' '.join(args),)
# print >> sys.stderr, 'ERROR in',cwd
raise RuntimeError('returncode %d'%returncode)
def parse_vals(cfg,section,option):
"""parse comma separated values in debian control file style from .cfg"""
try:
vals = cfg.get(section,option)
except ConfigParser.NoSectionError as err:
if section != 'DEFAULT':
vals = cfg.get('DEFAULT',option)
else:
raise err
vals = vals.split('#')[0]
vals = vals.strip()
vals = vals.split(',')
vals = [v.strip() for v in vals]
vals = [v for v in vals if len(v)]
return vals
def parse_val(cfg,section,option):
"""extract a single value from .cfg"""
vals = parse_vals(cfg,section,option)
if len(vals)==0:
return ''
else:
assert len(vals)==1, (section, option, vals, type(vals))
return vals[0]
def apt_cache_info(apt_cache_cmd,package_name):
if apt_cache_cmd not in ('showsrc','show'):
raise NotImplementedError(
"don't know how to run apt-cache command '%s'"%apt_cache_cmd)
result_list = []
args = ["apt-cache", apt_cache_cmd, package_name]
cmd = subprocess.Popen(args,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE)
returncode = cmd.wait()
if returncode:
errline = cmd.stderr.read()
if not (returncode == 100 and errline == "E: You must put some 'source' URIs in your sources.list\n"):
log.error('ERROR running: %s', ' '.join(args))
raise RuntimeError('returncode %d from subprocess %s' % (returncode,
args))
inlines = cmd.stdout.read()
version_blocks = inlines.split('\n\n')
for version_block in version_blocks:
block_dict = {}
if len(version_block)==0:
continue
version_lines = version_block.split('\n')
assert version_lines[0].startswith('Package: ')
block_dict['Package'] = version_lines[0][ len('Package: '): ]
if apt_cache_cmd == 'showsrc':
assert version_lines[1].startswith('Binary: ')
block_dict['Binary'] = version_lines[1][ len('Binary: '): ]
block_dict['Binary'] = block_dict['Binary'].split(', ')
elif apt_cache_cmd == 'show':
for start in ('Provides: ','Conflicts: ','Replaces: '):
key = start[:-2]
for line in version_lines[2:]:
if line.startswith(start):
unsplit_line_result = line[ len(start): ]
split_result = unsplit_line_result.split(', ')
block_dict[key] = split_result
if key not in block_dict:
block_dict[key] = []
result_list.append(block_dict)
return result_list
def check_cfg_files(cfg_files,module_name):
"""check if the configuration files actually specify something
If config files are given, give warning if they don't contain
information. This may indicate a wrong module name name, for
example.
"""
cfg = ConfigParser.SafeConfigParser()
cfg.read(cfg_files)
if cfg.has_section(module_name):
section_items = cfg.items(module_name)
else:
section_items = []
default_items = cfg.items('DEFAULT')
n_items = len(section_items) + len(default_items)
if n_items==0:
log.warn('configuration files were specified, but no options were '
'found in "%s" or "DEFAULT" sections.' % (module_name,) )
class DebianInfo:
"""encapsulate information for Debian distribution system"""
def __init__(self,
cfg_files=NotGiven,
module_name=NotGiven,
default_distribution=NotGiven,
guess_maintainer=NotGiven,
upstream_version=NotGiven,
has_ext_modules=NotGiven,
description=NotGiven,
long_description=NotGiven,
patch_file=None,
patch_level=None,
setup_requires=None,
debian_version=None,
have_script_entry_points = None,
use_setuptools = False,
guess_conflicts_provides_replaces = False,
sdist_dsc_command = None,
with_python2 = None,
with_python3 = None,
no_python2_scripts = None,
no_python3_scripts = None,
force_x_python3_version=False,
allow_virtualenv_install_location=False,
):
if cfg_files is NotGiven: raise ValueError("cfg_files must be supplied")
if module_name is NotGiven: raise ValueError(
"module_name must be supplied")
if default_distribution is NotGiven: raise ValueError(
"default_distribution must be supplied")
if guess_maintainer is NotGiven: raise ValueError(
"guess_maintainer must be supplied")
if upstream_version is NotGiven: raise ValueError(
"upstream_version must be supplied")
if has_ext_modules is NotGiven: raise ValueError(
"has_ext_modules must be supplied")
if description is NotGiven: raise ValueError(
"description must be supplied")
if long_description is NotGiven: raise ValueError(
"long_description must be supplied")
cfg_defaults = self._make_cfg_defaults(
module_name=module_name,
default_distribution=default_distribution,
guess_maintainer=guess_maintainer,
)
if len(cfg_files):
check_cfg_files(cfg_files,module_name)
cfg = ConfigParser.SafeConfigParser(cfg_defaults)
for cfg_file in cfg_files:
with codecs.open( cfg_file, mode='r', encoding='utf-8') as fd:
cfg.readfp(fd)
if sdist_dsc_command is not None:
# Allow distutils commands to override config files (this lets
# command line options beat file options).
for longopt, shortopt, desc in stdeb_cfg_options:
opt_name = longopt[:-1]
name = opt_name.replace('-','_')
value = getattr( sdist_dsc_command, name )
if value is not None:
if not cfg.has_section(module_name):
cfg.add_section(module_name)
cfg.set( module_name, opt_name, value )
self.stdeb_version = __stdeb_version__
self.module_name = module_name
self.source = parse_val(cfg,module_name,'Source')
self.package = parse_val(cfg,module_name,'Package')
self.package3 = parse_val(cfg,module_name,'Package3')
forced_upstream_version = parse_val(cfg,module_name,
'Forced-Upstream-Version')
if forced_upstream_version == '':
upstream_version_prefix = parse_val(cfg,module_name,
'Upstream-Version-Prefix')
upstream_version_suffix = parse_val(cfg,module_name,
'Upstream-Version-Suffix')
self.upstream_version = (upstream_version_prefix+
debianize_version(upstream_version)+
upstream_version_suffix)
else:
if (debianize_version(forced_upstream_version) !=
forced_upstream_version):
raise ValueError('forced upstream version ("%s") not a '
'Debian-compatible version (e.g. "%s")'%(
forced_upstream_version,
debianize_version(forced_upstream_version)))
self.upstream_version = forced_upstream_version
self.epoch = parse_val(cfg,module_name,'Epoch')
if self.epoch != '' and not self.epoch.endswith(':'):
self.epoch = self.epoch + ':'
self.packaging_version = parse_val(cfg,module_name,'Debian-Version')
if debian_version is not None:
# command-line arg overrides file
self.packaging_version = debian_version
self.dsc_version = '%s-%s'%(
self.upstream_version,
self.packaging_version)
self.full_version = '%s%s-%s'%(
self.epoch,
self.upstream_version,
self.packaging_version)
self.distname = parse_val(cfg,module_name,'Suite')
self.maintainer = ', '.join(parse_vals(cfg,module_name,'Maintainer'))
self.uploaders = parse_vals(cfg,module_name,'Uploaders')
self.date822 = get_date_822()
build_deps = []
if use_setuptools:
if with_python2:
build_deps.append('python-setuptools (>= 0.6b3)')
if with_python3:
build_deps.append('python3-setuptools')
if setup_requires is not None and len(setup_requires):
build_deps.extend(
get_deb_depends_from_setuptools_requires(setup_requires))
depends = ['${misc:Depends}', '${python:Depends}']
depends3 = ['${misc:Depends}', '${python3:Depends}']
need_custom_binary_target = False
if has_ext_modules:
self.architecture = 'any'
if with_python2:
build_deps.append('python-all-dev (>= %s)'%PYTHON_ALL_MIN_VERS)
depends.append('${shlibs:Depends}')
self.architecture3 = 'any'
if with_python3:
build_deps.append('python3-all-dev')
depends3.append('${shlibs:Depends}')
else:
self.architecture = 'all'
if with_python2:
build_deps.append('python-all (>= %s)'%PYTHON_ALL_MIN_VERS)
self.architecture3 = 'all'
if with_python3:
build_deps.append('python3-all')
self.copyright_file = parse_val(cfg,module_name,'Copyright-File')
self.mime_file = parse_val(cfg,module_name,'MIME-File')
self.shared_mime_file = parse_val(cfg,module_name,'Shared-MIME-File')
if self.mime_file == '' and self.shared_mime_file == '':
self.dh_installmime_indep_line = ''
else:
need_custom_binary_target = True
self.dh_installmime_indep_line = '\tdh_installmime'
mime_desktop_files = parse_vals(cfg,module_name,'MIME-Desktop-Files')
if len(mime_desktop_files):
need_custom_binary_target = True
self.dh_desktop_indep_line = '\tdh_desktop'
else:
self.dh_desktop_indep_line = ''
# E. any mime .desktop files
self.install_file_lines = []
for mime_desktop_file in mime_desktop_files:
self.install_file_lines.append(
'%s usr/share/applications'%mime_desktop_file)
depends.extend(parse_vals(cfg,module_name,'Depends') )
depends3.extend(parse_vals(cfg,module_name,'Depends3') )
self.depends = ', '.join(depends)
self.depends3 = ', '.join(depends3)
self.debian_section = parse_val(cfg,module_name,'Section')
self.description = re.sub('\s+', ' ', description).strip()
if long_description != 'UNKNOWN':
ld2=[]
for line in long_description.split('\n'):
ls = line.strip()
if len(ls):
ld2.append(' '+line)
else:
ld2.append(' .')
ld2 = ld2[:20]
self.long_description = '\n'.join(ld2)
else:
self.long_description = ''
if have_script_entry_points:
build_deps.append( 'debhelper (>= %s)'%DH_IDEAL_VERS )
else:
build_deps.append( 'debhelper (>= %s)'%DH_MIN_VERS )
build_deps.extend( parse_vals(cfg,module_name,'Build-Depends') )
self.build_depends = ', '.join(build_deps)
suggests = ', '.join( parse_vals(cfg,module_name,'Suggests') )
suggests3 = ', '.join( parse_vals(cfg,module_name,'Suggests3') )
recommends = ', '.join( parse_vals(cfg,module_name,'Recommends') )
recommends3 = ', '.join( parse_vals(cfg,module_name,'Recommends3') )
self.source_stanza_extras = ''
build_conflicts = parse_vals(cfg,module_name,'Build-Conflicts')
if len(build_conflicts):
self.source_stanza_extras += ('Build-Conflicts: '+
', '.join( build_conflicts )+'\n')
self.patch_file = parse_val(cfg,module_name,'Stdeb-Patch-File')
if patch_file is not None:
if self.patch_file != '':
raise RuntimeError('A patch file was specified on the command '
'line and in .cfg file.')
else:
self.patch_file = patch_file
self.patch_level = parse_val(cfg,module_name,'Stdeb-Patch-Level')
if self.patch_level != '':
if patch_level is not None:
raise RuntimeError('A patch level was specified on the command '
'line and in .cfg file.')
else:
self.patch_level = int(self.patch_level)
else:
if patch_level is not None:
self.patch_level = patch_level
else:
self.patch_level = 0
xs_python_version = parse_vals(cfg,module_name,'XS-Python-Version')
if len(xs_python_version)!=0:
self.source_stanza_extras += ('X-Python-Version: '+
', '.join(xs_python_version)+'\n')
x_python3_version = parse_vals(cfg,module_name,'X-Python3-Version')
x_python3_version = [v.strip('"') for v in x_python3_version]
if len(x_python3_version)!=0:
self.source_stanza_extras += ('X-Python3-Version: '+
', '.join(x_python3_version)+'\n')
dpkg_shlibdeps_params = parse_val(
cfg,module_name,'dpkg-shlibdeps-params')
if dpkg_shlibdeps_params:
need_custom_binary_target = True
self.dh_binary_arch_lines = """\tdh binary-arch --before dh_shlibdeps
\tdh_shlibdeps -a --dpkg-shlibdeps-params=%s
\tdh binary --after dh_shlibdeps"""%dpkg_shlibdeps_params
else:
self.dh_binary_arch_lines = '\tdh binary-arch'
self.dh_binary_indep_lines = '\tdh binary-indep'
conflicts = parse_vals(cfg,module_name,'Conflicts')
conflicts3 = parse_vals(cfg,module_name,'Conflicts3')
provides = parse_vals(cfg,module_name,'Provides')
provides3 = parse_vals(cfg,module_name,'Provides3')
replaces = parse_vals(cfg,module_name,'Replaces')
replaces3 = parse_vals(cfg,module_name,'Replaces3')
if guess_conflicts_provides_replaces:
# Find list of binaries which we will conflict/provide/replace.
cpr_binaries = set()
# Get original Debian information for the package named the same.
for version_info in apt_cache_info('showsrc',self.package):
# Remember each of the binary packages produced by the Debian source
for binary in version_info['Binary']:
cpr_binaries.add(binary)
# TODO: do this for every version available , just the
# first, or ???
break
# Descend each of the original binaries and see what
# packages they conflict/ provide/ replace:
for orig_binary in cpr_binaries:
for version_info in apt_cache_info('show',orig_binary):
provides.extend( version_info['Provides'])
provides3.extend( version_info['Provides'])
conflicts.extend(version_info['Conflicts'])
conflicts3.extend(version_info['Conflicts'])
replaces.extend( version_info['Replaces'])
replaces3.extend( version_info['Replaces'])
if self.package in cpr_binaries:
cpr_binaries.remove(self.package) # don't include ourself
cpr_binaries = list(cpr_binaries) # convert to list
conflicts.extend( cpr_binaries )
conflicts3.extend( cpr_binaries )
provides.extend( cpr_binaries )
provides3.extend( cpr_binaries )
replaces.extend( cpr_binaries )
replaces3.extend( cpr_binaries )
# round-trip through set to get unique entries
conflicts = list(set(conflicts))
conflicts3 = list(set(conflicts3))
provides = list(set(provides))
provides3 = list(set(provides3))
replaces = list(set(replaces))
replaces3 = list(set(replaces3))
self.package_stanza_extras = ''
self.package_stanza_extras3 = ''
if len(conflicts):
self.package_stanza_extras += ('Conflicts: '+
', '.join( conflicts )+'\n')
if len(conflicts3):
self.package_stanza_extras3 += ('Conflicts: '+
', '.join( conflicts3 )+'\n')
if len(provides):
self.package_stanza_extras += ('Provides: '+
', '.join( provides )+'\n')
if len(provides3):
self.package_stanza_extras3 += ('Provides: '+
', '.join( provides3 )+'\n')
if len(replaces):
self.package_stanza_extras += ('Replaces: ' +
', '.join( replaces )+'\n')
if len(replaces3):
self.package_stanza_extras3 += ('Replaces: ' +
', '.join( replaces3 )+'\n')
if len(recommends):
self.package_stanza_extras += ('Recommends: '+recommends+'\n')
if len(recommends3):
self.package_stanza_extras3 += ('Recommends: '+recommends3+'\n')
if len(suggests):
self.package_stanza_extras += ('Suggests: '+suggests+'\n')
if len(suggests3):
self.package_stanza_extras3 += ('Suggests: '+suggests3+'\n')
self.dirlist = ""
if not (with_python2 or with_python3):
raise RuntimeError('nothing to do - neither Python 2 or 3.')
sequencer_with = []
if with_python2:
sequencer_with.append('python2')
if with_python3:
sequencer_with.append('python3')
num_binary_packages = len(sequencer_with)
no_script_lines=[]
if no_python2_scripts:
# install to a location where debian tools do not find them
self.no_python2_scripts_cli_args = '--install-scripts=/trash'
no_script_lines.append(
'rm -rf debian/%s/trash'%(self.package,))
else:
self.no_python2_scripts_cli_args = ''
if no_python3_scripts:
# install to a location where debian tools do not find them
self.no_python3_scripts_cli_args = '--install-scripts=/trash'
no_script_lines.append(
'rm -rf debian/%s/trash'%(self.package3,))
else:
self.no_python3_scripts_cli_args = ''
self.scripts_cleanup = '\n'.join([' '+s for s in no_script_lines])
if sys.prefix != '/usr':
if not allow_virtualenv_install_location:
# virtualenv will set distutils
# --prefix=/path/to/virtualenv, but unless explicitly
# requested, we want to install into /usr.
self.install_prefix = '--prefix=/usr'
else:
self.install_prefix = '--prefix=%s' % sys.prefix
else:
self.install_prefix = ''
rules_override_clean_target_pythons = []
rules_override_build_target_pythons = []
rules_override_install_target_pythons = []
if with_python2:
rules_override_clean_target_pythons.append(
RULES_OVERRIDE_CLEAN_TARGET_PY2%self.__dict__
)
rules_override_build_target_pythons.append(
RULES_OVERRIDE_BUILD_TARGET_PY2%self.__dict__
)
rules_override_install_target_pythons.append(
RULES_OVERRIDE_INSTALL_TARGET_PY2%self.__dict__
)
if with_python3:
rules_override_clean_target_pythons.append(
RULES_OVERRIDE_CLEAN_TARGET_PY3%self.__dict__
)
rules_override_build_target_pythons.append(
RULES_OVERRIDE_BUILD_TARGET_PY3%self.__dict__
)
rules_override_install_target_pythons.append(
RULES_OVERRIDE_INSTALL_TARGET_PY3%self.__dict__
)
self.rules_override_clean_target_pythons = '\n'.join(rules_override_clean_target_pythons)
self.rules_override_build_target_pythons = '\n'.join(rules_override_build_target_pythons)
self.rules_override_install_target_pythons = '\n'.join(rules_override_install_target_pythons)
self.override_dh_auto_clean = RULES_OVERRIDE_CLEAN_TARGET%self.__dict__
self.override_dh_auto_build = RULES_OVERRIDE_BUILD_TARGET%self.__dict__
self.override_dh_auto_install = RULES_OVERRIDE_INSTALL_TARGET%self.__dict__
if force_x_python3_version and with_python3 and x_python3_version and \
x_python3_version[0]:
# override dh_python3 target to modify the dependencies
# to ensure that the passed minimum X-Python3-Version is usedby
version = x_python3_version[0]
if not version.endswith('~'):
version += '~'
self.override_dh_python3 = RULES_OVERRIDE_PYTHON3%{
'scripts': (
' sed -i ' +
'"s/\([ =]python3:any (\)>= [^)]*\()\)/\\1%s\\2/g" ' +
'debian/%s.substvars') % (version, self.package3)
}
else:
self.override_dh_python3 = ''
sequencer_options = ['--with '+','.join(sequencer_with)]
sequencer_options.append('--buildsystem=python_distutils')
self.sequencer_options = ' '.join(sequencer_options)
setup_env_vars = parse_vals(cfg,module_name,'Setup-Env-Vars')
self.exports = ""
if len(setup_env_vars):
self.exports += '\n'
self.exports += '#exports specified using stdeb Setup-Env-Vars:\n'
self.exports += '\n'.join(['export %s'%v for v in setup_env_vars])
self.exports += '\n'
self.udev_rules = parse_val(cfg,module_name,'Udev-Rules')
if need_custom_binary_target:
if self.architecture == 'all':
self.binary_target_lines = ( \
RULES_BINARY_ALL_TARGET%self.__dict__ + \
RULES_BINARY_INDEP_TARGET%self.__dict__ )
else:
self.binary_target_lines = ( \
RULES_BINARY_TARGET%self.__dict__ + \
RULES_BINARY_INDEP_TARGET%self.__dict__ + \
RULES_BINARY_ARCH_TARGET%self.__dict__ )
else:
self.binary_target_lines = ''
if with_python2:
self.control_py2_stanza = CONTROL_PY2_STANZA%self.__dict__
else:
self.control_py2_stanza = ''
if with_python3:
self.control_py3_stanza = CONTROL_PY3_STANZA%self.__dict__
else:
self.control_py3_stanza = ''
self.with_python2 = with_python2
self.with_python3 = with_python3
self.no_python2_scripts = no_python2_scripts
self.no_python3_scripts = no_python3_scripts
def _make_cfg_defaults(self,
module_name=NotGiven,
default_distribution=NotGiven,
guess_maintainer=NotGiven,
):
defaults = {}
default_re = re.compile(r'^.* \(Default: (.*)\)$')
for longopt,shortopt,description in stdeb_cfg_options:
assert longopt.endswith('=')
assert longopt.lower() == longopt
key = longopt[:-1]
matchobj = default_re.search( description )
if matchobj is not None:
# has a default value
groups = matchobj.groups()
assert len(groups)==1
value = groups[0]
# A few special cases
if value == '<source-debianized-setup-name>':
assert key=='source'
value = source_debianize_name(module_name)
elif value == 'python-<debianized-setup-name>':
assert key=='package'
value = 'python-' + debianize_name(module_name)
elif value == 'python3-<debianized-setup-name>':
assert key=='package3'
value = 'python3-' + debianize_name(module_name)
elif value == '<setup-maintainer-or-author>':
assert key=='maintainer'
value = guess_maintainer
if key=='suite':
if default_distribution is not None:
value = default_distribution
log.warn('Deprecation warning: you are using the '
'--default-distribution option. '
'Switch to the --suite option.')
else:
# no default value
value = ''
defaults[key] = value
return defaults
def build_dsc(debinfo,
dist_dir,
repackaged_dirname,
orig_sdist=None,
patch_posix=0,
remove_expanded_source_dir=0,
debian_dir_only=False,
sign_dsc=False,
):
"""make debian source package"""
# A. Find new dirname and delete any pre-existing contents
# dist_dir is usually 'deb_dist'
# the location of the copied original source package (it was
# re-recreated in dist_dir)
if debian_dir_only:
fullpath_repackaged_dirname = os.path.abspath(os.curdir)
else:
fullpath_repackaged_dirname = os.path.join(dist_dir,repackaged_dirname)
###############################################
# 1. make temporary original source tarball
# Note that, for the final tarball, best practices suggest
# using "dpkg-source -b". See
# http://www.debian.org/doc/developers-reference/ch-best-pkging-practices.en.html
# Create the name of the tarball that qualifies as the upstream
# source. If the original was specified, we'll link to
# it. Otherwise, we generate our own .tar.gz file from the output
# of "python setup.py sdist" (done above) so that we avoid
# packaging .svn directories, for example.
if not debian_dir_only:
repackaged_orig_tarball = ('%(source)s_%(upstream_version)s.orig.tar.gz'%
debinfo.__dict__)
repackaged_orig_tarball_path = os.path.join(dist_dir,
repackaged_orig_tarball)
if orig_sdist is not None:
if os.path.exists(repackaged_orig_tarball_path):
os.unlink(repackaged_orig_tarball_path)
link_func(orig_sdist,repackaged_orig_tarball_path)
else:
make_tarball(repackaged_orig_tarball,
repackaged_dirname,
cwd=dist_dir)
# apply patch
if debinfo.patch_file != '':
apply_patch(debinfo.patch_file,
posix=patch_posix,
level=debinfo.patch_level,
cwd=fullpath_repackaged_dirname)
for fname in ['Makefile','makefile']:
if os.path.exists(os.path.join(fullpath_repackaged_dirname,fname)):
sys.stderr.write('*'*1000 + '\n')
sys.stderr.write('WARNING: a Makefile exists in this package. '
'stdeb will tell debhelper 7 to use setup.py '
'to build and install the package, and the '
'Makefile will be ignored.\n')
sys.stderr.write('*'*1000 + '\n')
###############################################
# 2. create debian/ directory and contents
debian_dir = os.path.join(fullpath_repackaged_dirname,'debian')
if not os.path.exists(debian_dir):
os.mkdir(debian_dir)
# A. debian/changelog
changelog = CHANGELOG_FILE%debinfo.__dict__
with codecs.open( os.path.join(debian_dir,'changelog'),
mode='w', encoding='utf-8') as fd:
fd.write(changelog)
# B. debian/control
if debinfo.uploaders:
debinfo.uploaders = 'Uploaders: %s\n' % ', '.join(debinfo.uploaders)
else:
debinfo.uploaders = ''
control = CONTROL_FILE%debinfo.__dict__
with codecs.open( os.path.join(debian_dir,'control'),
mode='w', encoding='utf-8') as fd:
fd.write(control)
# C. debian/rules
debinfo.percent_symbol = '%'
rules = RULES_MAIN%debinfo.__dict__
rules = rules.replace(' ','\t')
rules_fname = os.path.join(debian_dir,'rules')
with codecs.open( rules_fname,
mode='w', encoding='utf-8') as fd:
fd.write(rules)
os.chmod(rules_fname,0o755)
# D. debian/compat
fd = open( os.path.join(debian_dir,'compat'), mode='w')
fd.write('7\n')
fd.close()
# E. debian/package.mime
if debinfo.mime_file != '':
if not os.path.exists(debinfo.mime_file):
raise ValueError(
'a MIME file was specified, but does not exist: %s'%(
debinfo.mime_file,))
link_func( debinfo.mime_file,
os.path.join(debian_dir,debinfo.package+'.mime'))
if debinfo.shared_mime_file != '':
if not os.path.exists(debinfo.shared_mime_file):
raise ValueError(
'a shared MIME file was specified, but does not exist: %s'%(
debinfo.shared_mime_file,))
link_func( debinfo.shared_mime_file,
os.path.join(debian_dir,
debinfo.package+'.sharedmimeinfo'))
# F. debian/copyright
if debinfo.copyright_file != '':
link_func( debinfo.copyright_file,
os.path.join(debian_dir,'copyright'))
# H. debian/<package>.install
if len(debinfo.install_file_lines):
fd = open( os.path.join(debian_dir,'%s.install'%debinfo.package), mode='w')
fd.write('\n'.join(debinfo.install_file_lines)+'\n')
fd.close()
# I. debian/<package>.udev
if debinfo.udev_rules != '':
fname = debinfo.udev_rules
if not os.path.exists(fname):
raise ValueError('udev rules file specified, but does not exist')
link_func(fname,
os.path.join(debian_dir,'%s.udev'%debinfo.package))
# J. debian/source/format
os.mkdir(os.path.join(debian_dir,'source'))
fd = open( os.path.join(debian_dir,'source','format'), mode='w')
fd.write('3.0 (quilt)\n')
fd.close()
fd = open( os.path.join(debian_dir,'source','options'), mode='w')
fd.write('extend-diff-ignore="\.egg-info$"')
fd.close()
if debian_dir_only:
return
###############################################
# 3. unpack original source tarball
debianized_package_dirname = fullpath_repackaged_dirname+'.debianized'
if os.path.exists(debianized_package_dirname):
raise RuntimeError('debianized_package_dirname exists: %s' %
debianized_package_dirname)
# A. move debianized tree away
os.rename(fullpath_repackaged_dirname, debianized_package_dirname )
if orig_sdist is not None:
# B. expand repackaged original tarball
tmp_dir = os.path.join(dist_dir,'tmp-expand')
os.mkdir(tmp_dir)
try:
expand_tarball(orig_sdist,cwd=tmp_dir)
orig_tarball_top_contents = os.listdir(tmp_dir)
# make sure original tarball has exactly one directory
assert len(orig_tarball_top_contents)==1
orig_dirname = orig_tarball_top_contents[0]
fullpath_orig_dirname = os.path.join(tmp_dir,orig_dirname)
# C. remove original repackaged tree
shutil.rmtree(fullpath_orig_dirname)
finally:
shutil.rmtree(tmp_dir)
if 1:
# check versions of debhelper and python-all
debhelper_version_str = get_version_str('debhelper')
if len(debhelper_version_str)==0:
log.warn('This version of stdeb requires debhelper >= %s, but you '
'do not have debhelper installed. '
'Could not check compatibility.'%DH_MIN_VERS)
else:
if not dpkg_compare_versions(
debhelper_version_str, 'ge', DH_MIN_VERS ):
log.warn('This version of stdeb requires debhelper >= %s. '
'Use stdeb 0.3.x to generate source packages '
'compatible with older versions of debhelper.'%(
DH_MIN_VERS,))
python_defaults_version_str = get_version_str('python-all')
if len(python_defaults_version_str)==0:
log.warn('This version of stdeb requires python-all >= %s, '
'but you do not have this package installed. '
'Could not check compatibility.'%PYTHON_ALL_MIN_VERS)
else:
if not dpkg_compare_versions(
python_defaults_version_str, 'ge', PYTHON_ALL_MIN_VERS):
log.warn('This version of stdeb requires python-all >= %s. '
'Use stdeb 0.6.0 or older to generate source packages '
'that use python-support.'%(
PYTHON_ALL_MIN_VERS,))
# D. restore debianized tree
os.rename(fullpath_repackaged_dirname+'.debianized',
fullpath_repackaged_dirname)
# Re-generate tarball using best practices see
# http://www.debian.org/doc/developers-reference/ch-best-pkging-practices.en.html
if sign_dsc:
args = ()
else:
args = ('-uc','-us')
dpkg_buildpackage('-S','-sa',*args,cwd=fullpath_repackaged_dirname)
if 1:
shutil.rmtree(fullpath_repackaged_dirname)
if not remove_expanded_source_dir:
# expand the debian source package
dsc_name = debinfo.source + '_' + debinfo.dsc_version + '.dsc'
dpkg_source('-x',dsc_name,
cwd=dist_dir)
CHANGELOG_FILE = """\
%(source)s (%(full_version)s) %(distname)s; urgency=low
* source package automatically created by stdeb %(stdeb_version)s
-- %(maintainer)s %(date822)s\n"""
CONTROL_FILE = """\
Source: %(source)s
Maintainer: %(maintainer)s
%(uploaders)sSection: %(debian_section)s
Priority: optional
Build-Depends: %(build_depends)s
Standards-Version: 3.9.1
%(source_stanza_extras)s
%(control_py2_stanza)s
%(control_py3_stanza)s
"""
CONTROL_PY2_STANZA = """
Package: %(package)s
Architecture: %(architecture)s
Depends: %(depends)s
%(package_stanza_extras)sDescription: %(description)s
%(long_description)s
"""
CONTROL_PY3_STANZA = """
Package: %(package3)s
Architecture: %(architecture3)s
Depends: %(depends3)s
%(package_stanza_extras3)sDescription: %(description)s
%(long_description)s
"""
RULES_MAIN = """\
#!/usr/bin/make -f
# This file was automatically generated by stdeb %(stdeb_version)s at
# %(date822)s
%(exports)s
%(percent_symbol)s:
dh $@ %(sequencer_options)s
%(override_dh_auto_clean)s
%(override_dh_auto_build)s
%(override_dh_auto_install)s
override_dh_python2:
dh_python2 --no-guessing-versions
%(override_dh_python3)s
%(binary_target_lines)s
"""
RULES_OVERRIDE_CLEAN_TARGET_PY2 = " python setup.py clean -a"
RULES_OVERRIDE_CLEAN_TARGET_PY3 = " python3 setup.py clean -a"
RULES_OVERRIDE_CLEAN_TARGET = """
override_dh_auto_clean:
%(rules_override_clean_target_pythons)s
find . -name \*.pyc -exec rm {} \;
"""
RULES_OVERRIDE_BUILD_TARGET_PY2 = " python setup.py build --force"
RULES_OVERRIDE_BUILD_TARGET_PY3 = " python3 setup.py build --force"
RULES_OVERRIDE_BUILD_TARGET = """
override_dh_auto_build:
%(rules_override_build_target_pythons)s
"""
RULES_OVERRIDE_INSTALL_TARGET_PY2 = " python setup.py install --force --root=debian/%(package)s --no-compile -O0 --install-layout=deb %(install_prefix)s %(no_python2_scripts_cli_args)s"
RULES_OVERRIDE_INSTALL_TARGET_PY3 = " python3 setup.py install --force --root=debian/%(package3)s --no-compile -O0 --install-layout=deb %(install_prefix)s %(no_python3_scripts_cli_args)s"
RULES_OVERRIDE_INSTALL_TARGET = """
override_dh_auto_install:
%(rules_override_install_target_pythons)s
%(scripts_cleanup)s
"""
RULES_OVERRIDE_PYTHON3 = """
override_dh_python3:
dh_python3
%(scripts)s
"""
RULES_BINARY_TARGET = """
binary: binary-arch binary-indep
"""
RULES_BINARY_ALL_TARGET = """
binary: binary-indep
"""
RULES_BINARY_ARCH_TARGET = """
binary-arch: build
%(dh_binary_arch_lines)s
"""
RULES_BINARY_INDEP_TARGET = """
binary-indep: build
%(dh_binary_indep_lines)s
%(dh_installmime_indep_line)s
%(dh_desktop_indep_line)s
"""
| mit |
JanNash/sms-tools | lectures/05-Sinusoidal-model/plots-code/sineModelAnal-bendir.py | 24 | 1245 | import numpy as np
import matplotlib.pyplot as plt
import sys, os, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import sineModel as SM
import utilFunctions as UF
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/bendir.wav'))
w = np.hamming(2001)
N = 2048
H = 200
t = -80
minSineDur = .02
maxnSines = 150
freqDevOffset = 10
freqDevSlope = 0.001
mX, pX = STFT.stftAnal(x, fs, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
plt.figure(1, figsize=(9.5, 7))
maxplotfreq = 800.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sinusoidal tracks (bendir.wav)')
plt.tight_layout()
plt.savefig('sineModelAnal-bendir.png')
plt.show() | agpl-3.0 |