repo_name
stringlengths 6
96
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 762
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
aestrivex/ielu | ielu/plotting_utils.py | 1 | 6230 |
import os
import numpy as np
import nibabel as nib
from traits.api import HasTraits, Float, Int, Tuple
from traitsui.api import View, Item, CSVListEditor
from .geometry import get_vox2rasxfm, apply_affine, get_std_orientation
from .utils import get_subjects_dir
def force_render( figure=None ):
from mayavi import mlab
figure.scene.render()
mlab.draw(figure=figure)
from pyface.api import GUI
_gui = GUI()
orig_val = _gui.busy
_gui.set_busy(busy=True)
_gui.process_events()
_gui.set_busy(busy=orig_val)
_gui.process_events()
def coronal_slice(elecs, start=None, end=None, outfile=None,
subjects_dir=None,
subject=None, reorient2std=True, dpi=150, size=(200,200),
title=None):
'''
create an image of a coronal slice which serves as a guesstimate of a
depth lead inserted laterally and nonvaryingly in the Y axis
plot the electrodes from the lead overlaid on the slice in the X and Z
directions
Paramaters
----------
elecs : List( Electrode )
list of electrode objects forming this depth lead
start : Electrode
Electrode object at one end of the depth lead
end : Electrode
Electrode object at the other end of the depth lead
outfile : Str
Filename to save the image to
subjects_dir : Str | None
The freesurfer subjects_dir. If this is None, it is assumed to be the
$SUBJECTS_DIR environment variable. If this folder is not writable,
the program will crash.
subject : Str | None
The freesurfer subject. If this is None, it is assumed to be the
$SUBJECT environment variable.
reorient2std : Bool
Apply a matrix to rotate orig.mgz to the standard MNI orientation
emulating fslreorient2std. Pretty much always true here.
dpi : Int
Dots per inch of output image
size : Tuple
Specify a 2-tuple to control the image size, default is (200,200)
title : Str
Specify a matplotlib title
'''
print('creating coronal slice with start electrodes %s' % str(start))
subjdir_subj = get_subjects_dir( subjects_dir=subjects_dir,
subject=subject )
orig = os.path.join(subjdir_subj, 'mri', 'orig.mgz')
x_size, y_size, z_size = nib.load(orig).shape
# vox2ras and ras2vox shouldnt have different procedures for
# getting the different dimensions. the matrix showing those
# dimensions has the correct dimensions by inversion beforehand
# in the complex 3-way case
vox2ras = get_vox2rasxfm(orig, stem='vox2ras')
ras2vox = np.linalg.inv(vox2ras)
ras2vox[0:3,3] = (x_size/2, y_size/2, z_size/2)
rd, ad, sd = get_std_orientation(ras2vox)
# rd, = np.where(np.abs(ras2vox[:,0]) == np.max(np.abs(ras2vox[:,0])))
# ad, = np.where(np.abs(ras2vox[:,1]) == np.max(np.abs(ras2vox[:,1])))
# sd, = np.where(np.abs(ras2vox[:,2]) == np.max(np.abs(ras2vox[:,2])))
r_size = [x_size, y_size, z_size][rd]
a_size = [x_size, y_size, z_size][ad]
s_size = [x_size, y_size, z_size][sd]
#starty = pd.map_cursor( start.asras(), pd.current_affine, invert=True)[1]
#endy = pd.map_cursor( end.asras(), pd.current_affine, invert=True )[1]
#midy = (starty+endy)/2
#pd.move_cursor(128, midy, 128)
electrodes = np.squeeze([apply_affine([e.asras()], ras2vox)
for e in elecs])
#electrodes = np.array([pd.map_cursor(e.asras(), ras2vox,
# invert=True) for e in elecs])
vol = np.transpose( nib.load(orig).get_data(), (rd, ad, sd) )
if start is not None and end is not None:
start_coord = np.squeeze(apply_affine([start.asras()], ras2vox))
end_coord = np.squeeze(apply_affine([end.asras()], ras2vox))
if start_coord[rd] == end_coord[rd]:
raise ValueError('This lead has no variation in the X axis. It shouldnt be displayed coronally')
slice = np.zeros((s_size, r_size))
m = (start_coord[ad]-end_coord[ad])/(start_coord[rd]-end_coord[rd])
b = start_coord[ad]-m*start_coord[rd]
rnew = np.arange(r_size)
anew = m*rnew+b
alower = np.floor(anew)
afrac = np.mod(anew, 1)
try:
for rvox in rnew:
slice[:, rvox] = (vol[rvox, alower[rvox], :] *
(1-afrac[rvox])+vol[rvox, alower[rvox]+1, :] *
afrac[rvox])
except IndexError:
raise ValueError('This lead has minimal variation in the X axis. It shouldnt be displayed coronally')
else:
slice_nr = np.mean(electrodes[:,ad])
slice = vol[:, slice_nr, :].T
vox2pix = np.zeros((2,4))
vox2pix[0, rd] = 1
vox2pix[1, sd] = 1
ras2pix = np.dot(vox2pix, ras2vox)
pix = np.dot(ras2pix,
np.transpose([np.append(e.asras(), 1) for e in elecs]))
#add data to coronal plane
import pylab as pl
fig = pl.figure()
pl.imshow(slice, cmap='gray')
pl.scatter(pix[0,:], pix[1,:], s=10, c='red', edgecolor='yellow',
linewidths=0.4)
if title is not None:
pl.title(title)
pl.axis('off')
#pl.show()
if outfile is not None:
pl.savefig(outfile, dpi=dpi)
return fig
def sequence_3d_images( figure ):
from mayavi import mlab
views = [lambda:mlab.view( azimuth=0, elevation=90, figure=figure ),
lambda:mlab.view( azimuth=180, elevation=90, figure=figure ),
lambda:mlab.view( azimuth=0, elevation=0, figure=figure ),
lambda:mlab.view( azimuth=90, elevation=90, figure=figure ),
lambda:mlab.view( azimuth=270, elevation=90, figure=figure )]
for view in views:
yield view
def save_opaque_clinical_sequence( savefile, mayavi_figure ):
import pylab as pl
from matplotlib.backends.backend_pdf import PdfPages
from mayavi import mlab
with PdfPages(savefile) as pdf:
for angle in sequence_3d_images( mayavi_figure ):
angle()
force_render( figure=mayavi_figure )
pixmap = mlab.screenshot( figure=mayavi_figure )
mpl_figure = pl.figure()
pl.imshow(pixmap, figure=mpl_figure)
pdf.savefig(mpl_figure)
| gpl-3.0 |
albu5/deepGroup | group-detection/vis_kernel_affinity.py | 1 | 6897 |
"""
Visualize and save group detections
"""
from utils import read_cad_frames, read_cad_annotations, get_interaction_features, add_annotation, custom_interaction_features
from matplotlib import pyplot as plt
from keras.models import Model
from keras.layers import Input, Dense
from keras.layers.merge import add
from keras.optimizers import adam
import keras.backend as kb
from keras.models import load_model
import numpy as np
from scipy import io
from utils import get_group_instance
from matplotlib import pyplot as plt
from keras import losses
from sklearn.cluster import AffinityPropagation, DBSCAN
import os
from numpy import genfromtxt, savetxt
def kernel_loss(y_true, y_pred):
inclusion_dist = kb.max(y_pred - 1 + y_true)
exclusion_dist = kb.max(y_pred - y_true)
exclusion_dist2 = kb.mean(y_pred * (1 - y_true) * kb.cast(y_pred > 0, dtype=kb.floatx()))
# ex_cost = kb.log(exclusion_dist + kb.epsilon()) * (1 - kb.prod(y_true))
# in_cost = -kb.log(inclusion_dist + kb.epsilon()) * (1 - kb.prod(1 - y_true))
ex_cost = (exclusion_dist2 + kb.epsilon()) * (1 - kb.prod(y_true))
in_cost = -(inclusion_dist + kb.epsilon()) * (1 - kb.prod(1 - y_true))
# return inclusion_dist * kb.sum(y_true)
# return - exclusion_dist * (1 - kb.prod(y_true))
return in_cost + ex_cost
def simple_loss(y_true, y_pred):
res_diff = (y_true - y_pred) * kb.cast(y_pred >= 0, dtype=kb.floatx())
return kb.sum(kb.square(res_diff))
'''
======================CONSTANTS==================================================================================
'''
losses.simple_loss = simple_loss
losses.kernel_loss = kernel_loss
if not os.path.exists('res'):
os.makedirs('res')
model_path = './models/cad-kernel-affinity-bottom-max-long-custom-20.h5'
n_max = 20
cad_dir = '../ActivityDataset'
annotations_dir = cad_dir + '/' + 'csvanno-long-feat'
# annotations_dir = cad_dir + '/' + 'csvanno-long-feat'
annotations_dir_out = cad_dir + '/' + 'csvanno-long-feat-results'
colorstr = ['r', 'g', 'b', 'k', 'w', 'm', 'c', 'y']
n = 11
# specify which sequences are visualized
test_seq = [1, 4, 5, 6, 8, 2, 7, 28, 35, 11, 10, 26]
kernel_net = load_model(model_path)
for n in range(1, 45):
try:
if n == 39:
continue
f = 1
pose_vec = genfromtxt('../common/pose/pose%2.2d.txt' % n)
pose_meta = genfromtxt('../common/pose/meta%2.2d.txt' % n)
action_vec = genfromtxt('../split1/atomic/actions.txt')
action_meta = genfromtxt('../split1/atomic/meta.txt')
if not os.path.exists('res/scene%d' % n):
os.makedirs('res/scene%d' % n)
# fig, ax = plt.subplots(1)
anno_data = read_cad_annotations(annotations_dir, n)
print(anno_data.shape)
n_frames = np.max(anno_data[:, 0])
while True:
f += 10
if f > n_frames:
break
im = read_cad_frames(cad_dir, n, f)
bx, by, bp, bi = custom_interaction_features(anno_data, f, max_people=20)
# print(bx[0].shape, by[0].shape, bp[0].shape)
# print(len(bx))
# print(bx[0][:, 18:22])
anno_data_i = anno_data[anno_data[:, 0] == f, :]
n_ped = anno_data_i.shape[0]
affinity_matrix = []
for j in range(len(bx)):
# uncomment this to visualize
# plt.clf()
# ax.clear()
# ax.imshow(im)
temp = np.squeeze(kernel_net.predict_on_batch(x=[bx[j], bp[j]]))
affinity_matrix.append(temp[0:n_ped].tolist())
# uncomment this to visualize individual features
# print()
# print(np.round(temp[0:n_ped], 2))
# print(by[j][0:n_ped, 0])
# print()
# add_annotation(ax, bi[j, 2:6], 'k', 2)
for k in range(n_ped):
l = k
# uncomment this to visualize individual features
# if l is not j:
# if np.sum(bi[k, 10:]) > 0:
# if temp[l] > 0.5:
# add_annotation(ax, bi[k, 2:6], 'b', 2)
# ax.arrow(bi[k, 2], bi[k, 3], 64 * bx[k][k, 0], 64 * bx[k][k, 1], fc='b', ec='b',
# head_width=5, head_length=10)
# else:
# add_annotation(ax, bi[k, 2:6], 'r', 2)
# ax.arrow(bi[k, 2], bi[k, 3], 64 * bx[k][k, 0], 64 * bx[k][k, 1], fc='r', ec='r',
# head_width=5, head_length=10)
# uncomment this to visualize individual features
# add_annotation(ax, bi[j, 2:6], 'k', 2)
# ax.arrow(bi[j, 2], bi[j, 3], 64*bx[j][0, 0], 64*bx[j][0, 1], fc='k', ec='k',
# head_width=5, head_length=10)
# print(bi[j, 2], bi[j, 3], 64*bx[j][0, 0], 64*bx[j][0, 1])
# plt.pause(1./2)
affinity_matrix = np.array(affinity_matrix)
affinity_matrix[np.isnan(affinity_matrix)] = 0
# try:
# print(affinity_matrix)
if n_ped == 0:
continue
af = DBSCAN(eps=0.55, metric='precomputed', min_samples=0, algorithm='auto', n_jobs=1)
af.fit(1-affinity_matrix)
# print(af.labels_)
af_labels = af.labels_
n_samples = af_labels.shape[0]
ipm = np.zeros(shape=(n_samples, n_samples))
for i1 in range(n_samples):
for i2 in range(n_samples):
ipm[i1, i2] = af_labels[i1] == af_labels[i2]
# print(ipm)
gt_pm = np.zeros(shape=(n_samples, n_samples))
for i1 in range(n_samples):
for i2 in range(n_samples):
gt_pm[i1, i2] = by[i1][i2, 0]
# print(gt_pm)
# ax.clear()
# ax.imshow(im)
# for j in range(len(bx)):
# # plt.clf()
# add_annotation(ax, bi[j, 2:6], colorstr[af_labels[j]], 2)
# plt.pause(0.01)
# plt.savefig('res/scene%d/frame%d.png' % (n, f))
## except:
# print('skipped clustering')
for ped_i in range(af_labels.shape[0]):
# print(np.sum(np.bitwise_and(anno_data[:, 0] == f, anno_data[:, 1] == ped_i+1)))
anno_data[np.bitwise_and(anno_data[:, 0] == f, anno_data[:, 1] == ped_i+1), 8] = af_labels[ped_i] + 1
# save group labels
savetxt(annotations_dir_out + '/' + 'data_%2.2d.txt' % n, anno_data, delimiter=',')
print(annotations_dir_out + '/' + 'data_%2.2d.txt' % n)
except:
print('skipped', n)
| mit |
MohammedWasim/scikit-learn | sklearn/linear_model/tests/test_base.py | 101 | 12205 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data, _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_linear_regression_sample_weights():
rng = np.random.RandomState(0)
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
clf = LinearRegression()
clf.fit(X, y, sample_weight)
coefs1 = clf.coef_
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_greater(clf.score(X, y), 0.9)
assert_array_almost_equal(clf.predict(X), y)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
scaled_y = y * np.sqrt(sample_weight)
scaled_X = X * np.sqrt(sample_weight)[:, np.newaxis]
clf.fit(X, y)
coefs2 = clf.coef_
assert_array_almost_equal(coefs1, coefs2)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
clf = LinearRegression()
# make sure the "OK" sample weights actually work
clf.fit(X, y, sample_weights_OK)
clf.fit(X, y, sample_weights_OK_1)
clf.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
rng = np.random.RandomState(0)
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
| bsd-3-clause |
ibell/coolprop | wrappers/Python/CoolProp/Plots/PsychChart.py | 1 | 5652 | """
This file implements a psychrometric chart for air at 1 atm
"""
import CoolProp
HAProps = CoolProp.HumidAirProp.HAProps
InlineLabel = CoolProp.Plots.Plots.InlineLabel
import matplotlib, numpy, textwrap
import_template=(
"""
import numpy, matplotlib
from CoolProp.HumidAirProp import HAProps
from CoolProp.Plots.Plots import InlineLabel
p = 101.325
Tdb = numpy.linspace(-10,60,100)+273.15
#Make the figure and the axes
fig=matplotlib.pyplot.figure(figsize=(10,8))
ax=fig.add_axes((0.1,0.1,0.85,0.85))
"""
)
closure_template=(
"""
matplotlib.pyplot.show()
"""
)
Tdb = numpy.linspace(-10,60,100)+273.15
p = 101.325
class PlotFormatting(object):
def plot(self,ax):
ax.set_xlim(Tdb[0]-273.15,Tdb[-1]-273.15)
ax.set_ylim(0,0.03)
ax.set_xlabel(r"Dry bulb temperature [$^{\circ}$C]")
ax.set_ylabel(r"Humidity ratio ($m_{water}/m_{dry\ air}$) [-]")
def __str__(self):
return textwrap.dedent("""
ax.set_xlim(Tdb[0]-273.15,Tdb[-1]-273.15)
ax.set_ylim(0,0.03)
ax.set_xlabel(r"Dry bulb temperature [$^{\circ}$C]")
ax.set_ylabel(r"Humidity ratio ($m_{water}/m_{dry\ air}$) [-]")
""")
class SaturationLine(object):
def plot(self,ax):
w = [HAProps('W','T',T,'P',p,'R',1.0) for T in Tdb]
ax.plot(Tdb-273.15,w,lw=2)
def __str__(self):
return textwrap.dedent("""
# Saturation line
w = [HAProps('W','T',T,'P',p,'R',1.0) for T in Tdb]
ax.plot(Tdb-273.15,w,lw=2)
"""
)
class HumidityLabels(object):
def __init__(self,RH_values,h):
self.RH_values = RH_values
self.h = h
def plot(self,ax):
xv = Tdb #[K]
for RH in self.RH_values:
yv = [HAProps('W','T',T,'P',p,'R',RH) for T in Tdb]
y = HAProps('W','P',p,'H',self.h,'R',RH)
T_K,w,rot = InlineLabel(xv, yv, y=y, axis = ax)
string = r'$\phi$='+str(RH*100)+'%'
#Make a temporary label to get its bounding box
bbox_opts = dict(boxstyle='square,pad=0.0',fc='white',ec='None',alpha = 0.5)
ax.text(T_K-273.15,w,string,rotation = rot,ha ='center',va='center',bbox=bbox_opts)
def __str__(self):
return textwrap.dedent("""
xv = Tdb #[K]
for RH in {RHValues:s}:
yv = [HAProps('W','T',T,'P',p,'R',RH) for T in Tdb]
y = HAProps('W','P',p,'H',{h:f},'R',RH)
T_K,w,rot = InlineLabel(xv, yv, y=y, axis = ax)
string = r'$\phi$='+str(RH*100)+'%'
bbox_opts = dict(boxstyle='square,pad=0.0',fc='white',ec='None',alpha = 0.5)
ax.text(T_K-273.15,w,string,rotation = rot,ha ='center',va='center',bbox=bbox_opts)
""".format(h=self.h, RHValues=str(self.RH_values))
)
class HumidityLines(object):
def __init__(self,RH_values):
self.RH_values = RH_values
def plot(self,ax):
for RH in self.RH_values:
w = [HAProps('W','T',T,'P',p,'R',RH) for T in Tdb]
ax.plot(Tdb-273.15,w,'r',lw=1)
def __str__(self):
return textwrap.dedent("""
# Humidity lines
RHValues = {RHValues:s}
for RH in RHValues:
w = [HAProps('W','T',T,'P',p,'R',RH) for T in Tdb]
ax.plot(Tdb-273.15,w,'r',lw=1)
""".format(RHValues=str(self.RH_values))
)
class EnthalpyLines(object):
def __init__(self,H_values):
self.H_values = H_values
def plot(self,ax):
for H in self.H_values:
#Line goes from saturation to zero humidity ratio for this enthalpy
T1 = HAProps('T','H',H,'P',p,'R',1.0)-273.15
T0 = HAProps('T','H',H,'P',p,'R',0.0)-273.15
w1 = HAProps('W','H',H,'P',p,'R',1.0)
w0 = HAProps('W','H',H,'P',p,'R',0.0)
ax.plot(numpy.r_[T1,T0],numpy.r_[w1,w0],'r',lw=1)
def __str__(self):
return textwrap.dedent("""
# Humidity lines
for H in {HValues:s}:
#Line goes from saturation to zero humidity ratio for this enthalpy
T1 = HAProps('T','H',H,'P',p,'R',1.0)-273.15
T0 = HAProps('T','H',H,'P',p,'R',0.0)-273.15
w1 = HAProps('W','H',H,'P',p,'R',1.0)
w0 = HAProps('W','H',H,'P',p,'R',0.0)
ax.plot(numpy.r_[T1,T0],numpy.r_[w1,w0],'r',lw=1)
""".format(HValues=str(self.H_values))
)
if __name__=='__main__':
fig=matplotlib.pyplot.figure(figsize=(10,8))
ax=fig.add_axes((0.1,0.1,0.85,0.85))
ax.set_xlim(Tdb[0]-273.15,Tdb[-1]-273.15)
ax.set_ylim(0,0.03)
ax.set_xlabel(r"Dry bulb temperature [$^{\circ}$C]")
ax.set_ylabel(r"Humidity ratio ($m_{water}/m_{dry\ air}$) [-]")
SL = SaturationLine()
SL.plot(ax)
RHL = HumidityLines([0.05,0.1,0.15,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9])
RHL.plot(ax)
RHLabels = HumidityLabels([0.05,0.1,0.15,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9], h=65)
RHLabels.plot(ax)
HL = EnthalpyLines(range(-20,100,10))
HL.plot(ax)
PF = PlotFormatting()
PF.plot(ax)
matplotlib.pyplot.show()
fp = open('PsychScript.py','w')
for chunk in [import_template,SL,RHL,HL,PF,RHLabels,closure_template]:
fp.write(str(chunk))
fp.close()
execfile('PsychScript.py')
| mit |
ulno/micropython-extra-ulno | examples/plot_log/liveplot2d.py | 2 | 4391 | #!/usr/bin/env python3
# tail a file into a plot
#
# take parameter of filename
#
# Resources:
# - http://stackoverflow.com/questions/11874767/real-time-plotting-in-while-loop-with-matplotlib
# - from: https://lethain.com/tailing-in-python/
#
# Author: ulno
# Create date: 2017-04-30
#
import time
from optparse import OptionParser
import numpy as np
import matplotlib.pyplot as plt
import numbers
SLEEP_INTERVAL = 0.02
minx = None
maxx = None
miny = None
maxy = None
interval_start = None
average_sum = 0
last_average = None
average_count = 0
point_counter = 0
def add_point(x, y, c="blue"):
global minx, maxx, miny, maxy, interval_start, average_sum, average_count
if minx is None:
minx = x
maxx = x + 1
# TODO: better init value for max
miny = y
maxy = y + 1
interval_start = x
else:
minx = min(minx, x)
maxx = max(maxx, x)
miny = min(miny, y)
maxy = max(maxy, y)
plt.axis([minx, maxx, miny, maxy])
plt.scatter(x, y, c=c)
def draw(x, y, interval, diff=False, c="blue"):
global point_counter, last_average, average_sum, average_count, interval_start
if not isinstance(x, numbers.Number) or not isinstance(y, numbers.Number):
return # don't draw if one is not a number
if interval is not None:
if interval_start == None and x is not None:
interval_start = x
if x > interval_start + interval:
current_average = last_average
if average_count > 0:
current_average = average_sum / average_count
point_counter += 1
# print("x", x,"avg", average_sum, "count", average_count, current_average)
if current_average is not None:
if diff and last_average is not None:
add_point(interval_start + interval / 2.0,
current_average - last_average, c=c)
else:
add_point(interval_start + interval / 2.0, current_average, c=c)
last_average = current_average
average_sum = 0
average_count = 0
interval_start += interval
average_sum += y
average_count += 1
else:
add_point(x, y, c=c)
point_counter += 1
def parse_lineas_tuple(l):
s = l.strip().split()
if len(s) >= 2:
retval = []
for i in s:
try:
conv = float(i)
except:
conv = None
retval.append(conv)
return retval
else:
return None
def init(fin, column, interval, diff=False, c="blue"):
global point_counter
counter = 0
for l in fin:
t = parse_lineas_tuple(l)
if t is not None and len(t) > column:
draw(t[0], t[column], interval, diff=diff, c=c)
counter += 1
if counter % 1000 == 0:
print("Read", counter, "lines,", point_counter, "valid points.")
plt.ion()
plt.show()
def tail(fin):
"Listen for new lines added to file."
while True:
where = fin.tell()
line = fin.readline()
if not line:
plt.pause(SLEEP_INTERVAL)
fin.seek(where)
else:
yield line
def main():
p = OptionParser("usage: liveplot.py file [color [[column] [interval for averaging [differential:diff ]]]]")
(options, args) = p.parse_args()
if len(args) < 1:
p.error("must at least specify a file to watch")
with open(args[0], 'r') as fin:
column = 1
color = "blue"
interval = None
if len(args) > 1:
color = args[1]
if len(args) > 2:
column = int(args[2])
if len(args) > 3:
interval = float(args[3])
if len(args) > 4:
diff = args[4].lower().startswith("diff")
else:
diff = False
init(fin, column, interval, diff=diff, c=color)
print("Read", point_counter, "valid points.")
print("Reached file end. If valid points are 0,\nno graphics is shown until there are valid points.")
for line in tail(fin):
p = parse_lineas_tuple(line)
if p is not None:
draw(p[0], p[column], interval, diff=diff, c=color)
plt.pause(SLEEP_INTERVAL)
if __name__ == '__main__':
main()
| mit |
henriquemiranda/yambo-py | tutorial/mos2/proj_mos2.py | 2 | 2428 | from __future__ import print_function, division
#
# Author: Henrique Pereira Coutada Miranda
# Example script to plot the weigth of the atomic species in the bandstructure
#
from qepy import *
import sys
import argparse
import matplotlib.pyplot as plt
folder = 'bands'
npoints = 20
p = Path([ [[0.0, 0.0, 0.0],'G'],
[[0.5, 0.0, 0.0],'M'],
[[1./3,1./3, 0.0],'K'],
[[0.0, 0.0, 0.0],'G']], [int(npoints*2),int(npoints),int(sqrt(5)*npoints)])
#parse options
parser = argparse.ArgumentParser(description='Test the yambopy script.')
parser.add_argument('-c' ,'--calc', action="store_true", help='Project orbitals')
parser.add_argument('-a' ,'--analyse', action="store_true", help='Analyse data')
parser.add_argument('-p1' ,'--plot_size', action="store_true", help='Analyse data')
parser.add_argument('-p2' ,'--plot_orbital', action="store_true", help='Analyse data')
args = parser.parse_args()
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
if args.calc:
f = open('proj.in','w')
projwfc = ProjwfcIn('mos2')
projwfc.write(folder=folder)
projwfc.run(folder=folder)
if args.analyse:
pxml = ProjwfcXML('mos2',path=folder)
# obtain the list of orbitals and quantum numbers
print(pxml)
print("Writting projections")
pxml.write_proj()
print("done!")
if args.plot_size:
pxml = ProjwfcXML('mos2',path=folder)
print(pxml)
# select orbitals to plot
# example1 mo, s2 and mos2
mo = list(range(16)) #list containing the indexes of all the orbitals of mo
s = list(range(16,48)) #list containing the indexes of all the orbitals of s
fig = plt.figure(figsize=(30,10))
for n,(orb,title) in enumerate(zip([mo,s,mo+s],['mo','s','mos2'])):
ax = plt.subplot(1,3,n+1)
plt.title(title)
pxml.plot_eigen(ax,path=p,selected_orbitals=orb,size=40)
ax.set_ylim([-7,6])
plt.show()
if args.plot_orbital:
pxml = ProjwfcXML('mos2',path=folder)
print(pxml)
# select orbitals to plot
# example1 mo, s2
mo = list(range(16)) #list containing the indexes of all the orbitals of mo
s = list(range(16,48)) #list containing the indexes of all the orbitals of s
fig = plt.figure(figsize=(8,10))
ax = plt.subplot(1,1,1)
pxml.plot_eigen(ax,path=p,selected_orbitals=mo,selected_orbitals_2=s,size=40,cmap='RdBu')
ax.set_ylim([-7,6])
plt.show()
| bsd-3-clause |
Ichaelus/Github-Classifier | Application/Models/ClassificationModules/metaonlyrandomforest.py | 1 | 2706 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from Models.FeatureProcessing import *
import sklearn
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import abc
from ClassificationModule import ClassificationModule
class metaonlyrandomforest(ClassificationModule):
"""A basic Random Forest Classifier"""
def __init__(self, n_estimators=250):
ClassificationModule.__init__(self, "Meta Only Random Forest", "Ensemble Learner with 250 Decision-Trees as base-classifier. Uses only our metadata.")
self.clf = RandomForestClassifier(n_estimators=n_estimators, class_weight = 'balanced')
print "\t-", self.name
def resetAllTraining(self):
"""Reset classification module to status before training"""
self.clf = sklearn.base.clone(self.clf)
def trainOnSample(self, sample, nb_epoch=10, shuffle=True, verbose=True):
"""Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird."""
readme_vec = self.formatInputData(sample)
label_index = getLabelIndex(sample)
return self.clf.fit(readme_vec, np.expand_dims(label_index, axis=0))
def train(self, samples, nb_epoch=10, shuffle=True, verbose=True):
"""Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)"""
train_samples = []
train_lables = []
for sample in samples:
formatted_sample = self.formatInputData(sample)[0].tolist()
train_samples.append(formatted_sample)
train_lables.append(getLabelIndex(sample))
train_lables = np.asarray(train_lables)
train_result = self.clf.fit(train_samples, train_lables)
self.isTrained = True
return train_result
def predictLabel(self, sample):
"""Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde"""
if not self.isTrained:
return 0
sample = self.formatInputData(sample)
return self.clf.predict(sample)[0]
def predictLabelAndProbability(self, sample):
"""Return the probability the module assignes each label"""
if not self.isTrained:
return [0, 0, 0, 0, 0, 0, 0, 0]
sample = self.formatInputData(sample)
prediction = self.clf.predict_proba(sample)[0]
return [np.argmax(prediction)] + list(prediction)
def formatInputData(self, sample):
"""Extract description and transform to vector"""
sd = getMetadataVector(sample)
# Returns numpy array which contains 1 array with features
return np.expand_dims(sd, axis=0)
| mit |
NicovincX2/Python-3.5 | Analyse (mathématiques)/Analyse numérique/Équations différentielles numériques/Méthode des éléments finis/hpfem2d.py | 1 | 6104 | # -*- coding: utf-8 -*-
"""
Program for generating 2D hp finite element trial functions and their
derivatives
Copyright (C) 2013 Greg von Winckel
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Created: Tue Sep 24 08:59:11 MDT 2013
Last updated: Sat Oct 5 10:15:09 MDT 2013
"""
import os
import numpy as np
import orthopoly as op
from nodes import nodes, vertex_indices, edge_indices, interior_indices
from pkdo import pkdo
from triquad import triquad
from numpy.linalg import inv
class hpfem2d(object):
def __init__(self, p):
"""
Form a basis generating object based on the (p+1)(p+2)/2
interpolation nodes
"""
self.p = p
# Construct the interpolation nodes
self.x, self.y = nodes(self.p)
# Form the PKDO Vandermonde on the nodes
V, _, _ = pkdo(self.p, self.x, self.y)
# Compute the inverse of the interpolation Vandermonde
self.Vi = inv(V)
def getInteriorTrial(self, q):
"""
Evaluate the nodal interpolating functions and their x and y
derivatives on a quadrature grid of q^2 points
"""
# Generate interior quadrature grid
xq, yq, wq = triquad(q)
# Compute Vandermondes PKDO polynomials and their derivatives on
# quadrature grid
V, Vx, Vy = pkdo(self.p, xq, yq)
# Trial functions
L = np.dot(V, self.Vi)
# x derivative of trial functions
Lx = np.dot(Vx, self.Vi)
# y derivative of trial functions
Ly = np.dot(Vy, self.Vi)
return xq, yq, wq, L, Lx, Ly
def getBoundaryTrial(self, q, edge):
"""
Evaluate the nodal interpolating functions along one of the edges
using q Legendre Gauss nodes
"""
# Gauss quadrature recursion coefficients
a, b = op.rec_jacobi(q, 0, 0)
# Legendre Gauss nodes and weights
t, wt = op.gauss(a, b)
# Affine map of [-1,1] to the appropriate triangle edge
xdict = {0: t, 1: -t, 2: -np.ones(q)}
ydict = {0: -np.ones(q), 1: t, 2: -t}
# Evaluate PKDO Vandermonde on the quadrature grid
V, _, _ = pkdo(self.p, xdict[edge], ydict[edge])
# Evaluate 2D Lagrange interpolants edge
L = np.dot(V, self.Vi)
return xdict[edge], ydict[edge], wt, L
def manufactured_solution(expression):
""" Evaluate a string for the exact symbolic solution and
create numerical function handles for all of the terms
needed to reconstruct it by solving the BVP """
from sympy import *
# Define symbolic variables for manufactured solution
x, y = symbols('x,y')
# Exact symbolic solution
u = eval(expression)
# Partial derivatives
ux = diff(u, x)
uy = diff(u, y)
# symbolic forcing function
f = -diff(ux, x) - diff(uy, y)
# Return list of numerical function handles
return [lambdify([x, y], fun, "numpy") for fun in [u, ux, uy, f]]
if __name__ == '__main__':
"""
Solve the Poisson equation with unit forcing on the
lower right triangle with Dirichlet (0), Neumann (1), and Robin (2)
conditions
"""
from scipy.linalg import solve
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Max polynomial order
p = 20
# Indices of interior and boundary points
idex = interior_indices(p)
edex = edge_indices(p)
# Instantiate FEM basis generator for this order
FEM = hpfem2d(p)
# Get function handles for the manufactured solution
u, ux, uy, f = manufactured_solution("cos(pi*(x-y)) + sin(pi*(x+y))")
# Get interior points and basis functions
xq, yq, wq, L, Lx, Ly = FEM.getInteriorTrial(p)
# Get boundary quadrature and basis functions
x1, y1, w1, L1 = FEM.getBoundaryTrial(p, 1)
x2, y2, w2, L2 = FEM.getBoundaryTrial(p, 2)
# Inner product over the elemental interior
def iprod(A, B):
return np.dot(wq * A.T, B)
# Interpolation points
x, y = FEM.x, FEM.y
# Total number of nodes
N = len(x)
# Evaluate the exact solution on edge 0 - including the vertex nodes
# because this side has a Dirichlet condition
e0 = [0, 1] + edex[0]
a = u(x[e0], y[e0])
# Evaluate the normal derivative on edge 1
b = ux(x1, y1) + uy(x1, y1)
# Evaluate the solution plus normal derivative on edge 2
c = u(x2, y2) - ux(x2, y2)
# Compute load vector
fhat = iprod(L, f(xq, yq))
# Integrate inhomogeneous boundary terms against test functions
bhat = np.dot(w1 * L1.T, b)
chat = np.dot(w2 * L2.T, c)
# Surface matrix for Robin condition on edge 2
S = np.dot(w2 * L2.T, L2)
# Stiffness matrix
K = iprod(Lx, Lx) + iprod(Ly, Ly)
# Left-hand-side
LHS = K + S
# Computed solution
psi = np.zeros(N)
# Set Dirichlet data
psi[e0] = a
# Right-hand-side
rhs = fhat + bhat + chat - np.dot(LHS, psi)
# Solve for interior points, and points on edges 1 and 2, and vertex 2
dex = idex + edex[1] + edex[2] + [2]
psi[dex] = solve(LHS[dex, :][:, dex], rhs[dex])
fig = plt.figure()
ax1 = fig.add_subplot(121, projection='3d')
ax2 = fig.add_subplot(122, projection='3d')
ax1.plot_trisurf(x, y, psi, cmap=plt.cm.CMRmap)
ax1.set_title('Computed Solution')
ax2.plot_trisurf(x, y, u(x, y), cmap=plt.cm.CMRmap)
ax2.set_title('Exact Solution')
plt.show()
os.system("pause")
| gpl-3.0 |
voxlol/scikit-learn | sklearn/utils/__init__.py | 132 | 14185 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric, DataConversionWarning)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality"""
| bsd-3-clause |
tgy/facedetect | script/plot_mblbp.py | 1 | 2423 | #!/usr/bin/env python3
'''Visualize randomly chosen mblbp features in a given window'''
import random
import os
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.animation as animation
import matplotlib.image as mpimg
from PIL import Image
COLORS = ['#1a535c', '#4ecdc4', '#ff6b6b', '#ffe66d', '#ffe66d',
'#ff6b6b', '#4ecdc4', '#1a535c']
def plot_animated_mblbp(window_w, window_h):
dpi = 96
img = Image.open('gfx/tgy.jpg').convert('LA')
fig = plt.figure(figsize=(dpi / 40, dpi / 40), dpi=dpi, frameon=False)
ax = plt.axes(xlim=(0, 20), ylim=(0, 20))
ax.imshow(img, interpolation='nearest', cmap=plt.get_cmap('gray'),
extent=[0, 20, 20, 0], alpha=0.7)
ax.set_ylim(ax.get_ylim()[::-1]) # invert y-axis
ax.xaxis.tick_top() # move x-axis to the top
ax.xaxis.set_ticks(range(1, 21))
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticks(range(1, 21))
ax.yaxis.set_ticklabels([])
for tic in ax.xaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
tic.label1On = tic.label2On = False
for tic in ax.yaxis.get_major_ticks():
tic.tick1On = tic.tick2On = False
tic.label1On = tic.label2On = False
ax.grid(True, which='both', linestyle='-')
for spine in ax.spines:
ax.spines[spine].set_visible(False)
features = []
for block_w in range(3, 10, 3):
for block_h in range(3, 10, 3):
for offset_x in range(window_w - block_w + 1):
for offset_y in range(window_h - block_h + 1):
feature = {
'block_w': block_w,
'block_h': block_h,
'offset_x': offset_x,
'offset_y': offset_y,
}
features.append(feature)
random.shuffle(features)
sample = random.sample(features, 6)
for i, feature in enumerate(sample):
rectangle = patches.Rectangle(
(feature['offset_x'], feature['offset_y']),
feature['block_w'], feature['block_h'],
facecolor=COLORS[i], linewidth=1, alpha=0.4,
)
ax.add_patch(rectangle)
plt.savefig('gfx/mblbp.svg', interpolation='nearest')
plt.show()
def main():
window_w = 20
window_h = 20
plot_animated_mblbp(window_w, window_h)
if __name__ == '__main__':
main()
| mit |
cmap/cmapPy | cmapPy/math/tests/test_fast_cov.py | 1 | 17355 | import unittest
import logging
import cmapPy.pandasGEXpress.setup_GCToo_logger as setup_logger
import cmapPy.math.fast_cov as fast_cov
import numpy
import tempfile
import os
logger = logging.getLogger(setup_logger.LOGGER_NAME)
class TestFastCov(unittest.TestCase):
@staticmethod
def build_standard_x_y():
x = numpy.array([[1,2,3], [5,7,11]], dtype=float)
logger.debug("x: {}".format(x))
logger.debug("x.shape: {}".format(x.shape))
y = numpy.array([[13, 17, 19], [23, 29, 31]], dtype=float)
logger.debug("y: {}".format(y))
logger.debug("y.shape: {}".format(y.shape))
return x, y
@staticmethod
def build_nan_containing_x_y():
x = numpy.array([[1,numpy.nan,2], [3,5,7], [11,13,17]], dtype=float)
logger.debug("x:\n{}".format(x))
logger.debug("x.shape: {}".format(x.shape))
y = numpy.array([[19, 23, 29], [31, 37, 41], [numpy.nan, 43, 47]], dtype=float)
logger.debug("y:\n{}".format(y))
logger.debug("y.shape: {}".format(y.shape))
return x, y
def test_validate_inputs(self):
shape = (3,2)
#happy path just x
x = numpy.zeros(shape)
fast_cov.validate_inputs(x, None, None)
x = numpy.zeros(1)
fast_cov.validate_inputs(x, None, None)
#unhappy path just x, x does not have shape attribute
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(None, None, None)
logger.debug("unhappy path just x, x does not have shape attribute - context.exception: {}".format(context.exception))
self.assertIn("x needs to be numpy array-like", str(context.exception))
#unhappy path x does not have shape attribute, y does not have shape attribute
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(None, 3, None)
logger.debug("unhappy path x does not have shape attribute, y does not have shape attribute - context.exception: {}".format(context.exception))
self.assertIn("x needs to be numpy array-like", str(context.exception))
self.assertIn("y needs to be numpy array-like", str(context.exception))
#happy path x and y
x = numpy.zeros(shape)
y = numpy.zeros(shape)
fast_cov.validate_inputs(x, y, None)
#happy path y different shape from x
y = numpy.zeros((3,1))
fast_cov.validate_inputs(x, y, None)
#unhappy path y different shape from x, invalid axis
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(x, y.T, None)
logger.debug("unhappy path y different shape from x, invalid axis - context.exception: {}".format(context.exception))
self.assertIn("the number of rows in the x and y matrices must be the same", str(context.exception))
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(x.T, y, None)
logger.debug("unhappy path y different shape from x, invalid axis - context.exception: {}".format(context.exception))
self.assertIn("the number of rows in the x and y matrices must be the same", str(context.exception))
#happy path with x, destination
x = numpy.zeros(shape)
dest = numpy.zeros((shape[1], shape[1]))
fast_cov.validate_inputs(x, None, dest)
#unhappy path with x, destination wrong size
dest = numpy.zeros((shape[1]+1, shape[1]))
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(x, None, dest)
logger.debug("unhappy path incorrrect shape of destination for provided x - context.exception: {}".format(context.exception))
self.assertIn("x and destination provided", str(context.exception))
self.assertIn("destination must have shape matching", str(context.exception))
#happy path with x, y, destination
x = numpy.zeros(shape)
y = numpy.zeros((shape[0], shape[1]+1))
dest = numpy.zeros((shape[1], shape[1]+1))
fast_cov.validate_inputs(x, y, dest)
#unhappy path x, y, destination wrong size
dest = numpy.zeros((shape[1], shape[1]+2))
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.validate_inputs(x, y, dest)
logger.debug("unhappy path incorrrect shape of destination for provided x, y - context.exception: {}".format(context.exception))
self.assertIn("x, y, and destination provided", str(context.exception))
self.assertIn("destination must have number of", str(context.exception))
def test_fast_cov_check_validations_run(self):
#unhappy path check that input validation checks are run
with self.assertRaises(fast_cov.CmapPyMathFastCovInvalidInputXY) as context:
fast_cov.fast_cov(None, None)
logger.debug("unhappy path check that input validation checks are run - context.exception: {}".format(context.exception))
def test_fast_cov_just_x(self):
logger.debug("*************happy path just x")
x, _ = TestFastCov.build_standard_x_y()
ex = numpy.cov(x, rowvar=False)
logger.debug("expected ex: {}".format(ex))
r = fast_cov.fast_cov(x)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
#happy path just x, uses destination
dest = numpy.zeros((x.shape[1], x.shape[1]))
r = fast_cov.fast_cov(x, destination=dest)
logger.debug("happy path just x, uses destination - r: {}".format(r))
self.assertIs(dest, r)
self.assertTrue(numpy.allclose(ex, dest))
#happy path just x, uses destination which is a different type
dest = dest.astype(numpy.float16)
r = fast_cov.fast_cov(x, destination=dest)
logger.debug("happy path, just x, uses destination which is a different type - r: {}".format(r))
self.assertIs(dest, r)
self.assertTrue(numpy.allclose(ex, dest))
#happy path just x, uses destination that is a numpy.memmap
outfile = tempfile.mkstemp()
logger.debug("happy path, just x, uses destination which is a numpy.memmap - outfile: {}".format(outfile))
dest = numpy.memmap(outfile[1], dtype="float16", mode="w+", shape=ex.shape)
dest_array = numpy.asarray(dest)
r = fast_cov.fast_cov(x, destination=dest_array)
dest.flush()
logger.debug(" - r: {}".format(r))
os.close(outfile[0])
os.remove(outfile[1])
#happy path just x, transposed
ex = numpy.cov(x, rowvar=True)
logger.debug("happy path just x, transposed, expected ex: {}".format(ex))
r = fast_cov.fast_cov(x.T)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
def test_fast_cov_x_and_y(self):
logger.debug("*************happy path x and y")
x, y = TestFastCov.build_standard_x_y()
combined = numpy.hstack([x, y])
logger.debug("combined: {}".format(combined))
logger.debug("combined.shape: {}".format(combined.shape))
off_diag_ind = int(combined.shape[1] / 2)
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("raw expected produced from numpy.cov on full combined - raw_ex: {}".format(raw_ex))
ex = raw_ex[:off_diag_ind, off_diag_ind:]
logger.debug("expected ex: {}".format(ex))
r = fast_cov.fast_cov(x, y)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
#happy path x, y, and destination
dest = numpy.zeros((x.shape[1], y.shape[1]))
r = fast_cov.fast_cov(x, y, dest)
logger.debug("happy path x, y, and destination - r: {}".format(r))
self.assertIs(dest, r)
self.assertTrue(numpy.allclose(ex, dest))
#happy path x and y, other direction
combined = numpy.hstack([x.T, y.T])
off_diag_ind = int(combined.shape[1] / 2)
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("happy path x and y, other direction, raw expected produced from numpy.cov on full combined - raw_ex: {}".format(raw_ex))
ex = raw_ex[:off_diag_ind, off_diag_ind:]
logger.debug("expected ex: {}".format(ex))
r = fast_cov.fast_cov(x.T, y.T)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
def test_fast_cov_x_and_y_different_shapes(self):
logger.debug("*************happy path x and y different shapes")
x, _ = TestFastCov.build_standard_x_y()
y = numpy.array([[13, 17, 19, 23, 41], [23, 29, 31, 37, 43]])
logger.debug("y.shape: {}".format(y.shape))
logger.debug("y:\n{}".format(y))
combined = numpy.hstack([x, y])
logger.debug("combined: {}".format(combined))
logger.debug("combined.shape: {}".format(combined.shape))
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("raw expected produced from numpy.cov on full combined - raw_ex: {}".format(raw_ex))
logger.debug("raw_ex.shape: {}".format(raw_ex.shape))
ex = raw_ex[:x.shape[1], -y.shape[1]:]
logger.debug("expected ex: {}".format(ex))
logger.debug("ex.shape: {}".format(ex.shape))
r = fast_cov.fast_cov(x, y)
logger.debug("r: {}".format(r))
self.assertTrue(numpy.allclose(ex, r))
#happy path x and y different shapes, using destination
dest = numpy.zeros((x.shape[1], y.shape[1]))
r = fast_cov.fast_cov(x, y, dest)
logger.debug("happy path x and y different shapes, using destination - r: {}".format(r))
self.assertIs(dest, r)
self.assertTrue(numpy.allclose(ex, dest))
def test_fast_cov_1D_arrays(self):
logger.debug("*****************happy path test_fast_cov_1D_arrays")
x = numpy.array(range(3))
logger.debug("x.shape: {}".format(x.shape))
r = fast_cov.fast_cov(x)
logger.debug("r: {}".format(r))
self.assertEqual(1., r[0][0])
y = numpy.array(range(3,6))
logger.debug("y.shape: {}".format(y.shape))
r = fast_cov.fast_cov(x, y)
logger.debug("r: {}".format(r))
self.assertEqual(1., r[0][0])
def test_calculate_non_mask_overlaps(self):
x = numpy.zeros((3,3))
x[0,1] = numpy.nan
x = numpy.ma.array(x, mask=numpy.isnan(x))
logger.debug("happy path x has 1 nan - x:\n{}".format(x))
r = fast_cov.calculate_non_mask_overlaps(x.mask, x.mask)
logger.debug("r:\n{}".format(r))
expected = numpy.array([[3,2,3], [2,2,2], [3,2,3]], dtype=int)
self.assertTrue(numpy.array_equal(expected, r))
def test_nan_fast_cov_just_x(self):
logger.debug("*************happy path just x")
x, _ = TestFastCov.build_nan_containing_x_y()
ex_with_nan = numpy.cov(x, rowvar=False)
logger.debug("expected with nan's - ex_with_nan:\n{}".format(ex_with_nan))
r = fast_cov.nan_fast_cov(x)
logger.debug("r:\n{}".format(r))
non_nan_locs = ~numpy.isnan(ex_with_nan)
self.assertTrue(numpy.allclose(ex_with_nan[non_nan_locs], r[non_nan_locs]))
check_nominal_nans = []
u = x[1:, 1]
for i in range(3):
t = x[1:, i]
c = numpy.cov(t, u, bias=False)[0,1]
check_nominal_nans.append(c)
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
self.assertTrue(numpy.allclose(check_nominal_nans, r[:, 1]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[1, :]))
def test_nan_fast_cov_x_and_y(self):
logger.debug("*************happy path x and y")
x, y = TestFastCov.build_nan_containing_x_y()
combined = numpy.hstack([x, y])
logger.debug("combined:\n{}".format(combined))
logger.debug("combined.shape: {}".format(combined.shape))
off_diag_ind = int(combined.shape[1] / 2)
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("raw expected produced from numpy.cov on full combined - raw_ex:\n{}".format(raw_ex))
ex = raw_ex[:off_diag_ind, off_diag_ind:]
logger.debug("expected ex:\n{}".format(ex))
r = fast_cov.nan_fast_cov(x, y)
logger.debug("r:\n{}".format(r))
non_nan_locs = ~numpy.isnan(ex)
logger.debug("ex[non_nan_locs]: {}".format(ex[non_nan_locs]))
logger.debug("r[non_nan_locs]: {}".format(r[non_nan_locs]))
self.assertTrue(numpy.allclose(ex[non_nan_locs], r[non_nan_locs]))
check_nominal_nans = []
t = x[1:, 1]
for i in [1,2]:
u = y[1:, i]
c = numpy.cov(t,u)
check_nominal_nans.append(c[0,1])
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
logger.debug("r values to compare to - r[1, 1:]: {}".format(r[1, 1:]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[1, 1:]))
check_nominal_nans = []
u = y[:2, 0]
for i in [0, 2]:
t = x[:2, i]
c = numpy.cov(t,u)
check_nominal_nans.append(c[0,1])
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
logger.debug("r values to compare to - r[[0,2], 0]: {}".format(r[[0,2], 0]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[[0,2], 0]))
self.assertTrue(numpy.isnan(r[1,0]), """expect this entry to be nan b/c for the intersection of x[:,1] and y[:,0]
there is only one entry in common, therefore covariance is undefined""")
def test_nan_fast_cov_x_and_y_different_shapes(self):
logger.debug("*************happy path x and y different shapes")
x, t = TestFastCov.build_nan_containing_x_y()
y = numpy.zeros((t.shape[0], t.shape[1]+1))
y[:, :t.shape[1]] = t
y[:, t.shape[1]] = [53, 59, 61]
logger.debug("y.shape: {}".format(y.shape))
logger.debug("y:\n{}".format(y))
combined = numpy.hstack([x, y])
logger.debug("combined:\n{}".format(combined))
logger.debug("combined.shape: {}".format(combined.shape))
raw_ex = numpy.cov(combined, rowvar=False)
logger.debug("raw expected produced from numpy.cov on full combined - raw_ex:\n{}".format(raw_ex))
logger.debug("raw_ex.shape: {}".format(raw_ex.shape))
ex = raw_ex[:x.shape[1], -y.shape[1]:]
logger.debug("expected ex:\n{}".format(ex))
logger.debug("ex.shape: {}".format(ex.shape))
r = fast_cov.nan_fast_cov(x, y)
logger.debug("r:\n{}".format(r))
non_nan_locs = ~numpy.isnan(ex)
logger.debug("ex[non_nan_locs]: {}".format(ex[non_nan_locs]))
logger.debug("r[non_nan_locs]: {}".format(r[non_nan_locs]))
self.assertTrue(numpy.allclose(ex[non_nan_locs], r[non_nan_locs]))
check_nominal_nans = []
t = x[1:, 1]
for i in [1,2,3]:
u = y[1:, i]
c = numpy.cov(t,u)
check_nominal_nans.append(c[0,1])
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
logger.debug("r values to compare to - r[1, 1:]: {}".format(r[1, 1:]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[1, 1:]))
check_nominal_nans = []
u = y[:2, 0]
for i in [0, 2]:
t = x[:2, i]
c = numpy.cov(t,u)
check_nominal_nans.append(c[0,1])
logger.debug("calculate entries that would be nan - check_nominal_nans: {}".format(check_nominal_nans))
logger.debug("r values to compare to - r[[0,2], 0]: {}".format(r[[0,2], 0]))
self.assertTrue(numpy.allclose(check_nominal_nans, r[[0,2], 0]))
self.assertTrue(numpy.isnan(r[1,0]), """expect this entry to be nan b/c for the intersection of x[:,1] and y[:,0]
there is only one entry in common, therefore covariance is undefined""")
def test_nan_fast_cov_all_nan(self):
x = numpy.zeros(3)
x[:] = numpy.nan
x = x[:, numpy.newaxis]
logger.debug("x:\n{}".format(x))
r = fast_cov.nan_fast_cov(x)
logger.debug("r:\n{}".format(r))
self.assertEqual(0, numpy.sum(numpy.isnan(r)))
def test_nan_fast_cov_1D_arrays(self):
logger.debug("*****************happy path test_nan_fast_cov_1D_arrays")
x = numpy.array(range(3))
logger.debug("x.shape: {}".format(x.shape))
r = fast_cov.nan_fast_cov(x)
logger.debug("r: {}".format(r))
self.assertEqual(1., r[0][0])
y = numpy.array(range(3,6))
logger.debug("y.shape: {}".format(y.shape))
r = fast_cov.fast_cov(x, y)
logger.debug("r: {}".format(r))
self.assertEqual(1., r[0][0])
if __name__ == "__main__":
setup_logger.setup(verbose=True)
unittest.main()
| bsd-3-clause |
drpngx/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_test.py | 30 | 60826 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
model_ops = dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN,
params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
wire_var = dnn_linear_combined._get_embedding_variable(
embedding_wire, 'dnn', 'dnn/input_from_feature_columns')
for _ in range(2):
_, language_value, wire_value = sess.run(
[model_ops.train_op, language_var, wire_var])
initial_value = np.full_like(language_value, 0.1)
self.assertTrue(np.all(np.isclose(language_value, initial_value)))
self.assertFalse(np.all(np.isclose(wire_value, initial_value)))
class ActivationFunctionTest(test.TestCase):
def _getModelForActivation(self, activation_fn):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'feature_columns': [embedding_language],
'head': head_lib.multi_class_head(2),
'hidden_units': [1],
'activation_fn': activation_fn,
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
return dnn._dnn_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params)
def testValidActivation(self):
_ = self._getModelForActivation('relu')
def testRaisesOnBadActivationName(self):
with self.assertRaisesRegexp(ValueError,
'Activation name should be one of'):
self._getModelForActivation('max_pool')
class DNNEstimatorTest(test.TestCase):
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNEstimator)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
dnn_estimator = dnn.DNNEstimator(
head=head_lib.multi_class_head(2, weight_column_name='w'),
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
dnn_estimator.fit(input_fn=_input_fn_train, steps=5)
scores = dnn_estimator.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
class DNNClassifierTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNClassifier(
n_classes=3,
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNClassifier)
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn.DNNClassifier(
feature_columns=[embedding_language],
hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertEqual(1, classifier.params['input_layer_min_slice_size'])
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertClassificationPredictions(
self, expected_len, n_classes, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, range(n_classes))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
fc_core.embedding_column(language_column, dimension=1),
fc_core.numeric_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertBinaryPredictions(3, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predictions_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[10, 10],
label_keys=label_keys,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn.DNNClassifier(
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
n_classes=2,
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn.DNNClassifier(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testPredict_AsIterableFalse(self):
"""Tests predict and predict_prob methods with as_iterable=False."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[10, 10],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predicted_classes = classifier.predict_classes(
input_fn=_input_fn, as_iterable=False)
self._assertClassificationPredictions(3, n_classes, predicted_classes)
predictions = classifier.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllEqual(predicted_classes, predictions)
probabilities = classifier.predict_proba(
input_fn=_input_fn, as_iterable=False)
self._assertProbabilities(3, n_classes, probabilities)
def testPredict_AsIterable(self):
"""Tests predict and predict_prob methods with as_iterable=True."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
n_classes = 3
classifier = dnn.DNNClassifier(
n_classes=n_classes,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=300)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self._assertClassificationPredictions(3, n_classes, predicted_classes)
predictions = list(
classifier.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
predicted_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self._assertProbabilities(3, n_classes, predicted_proba)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn.DNNClassifier(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
model_dir = tempfile.mkdtemp()
classifier = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = dnn.DNNClassifier(
model_dir=model_dir,
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(
language, dimension=1)
]
classifier = dnn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=5)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertIn('dnn/multi_class_head/centered_bias_weight',
classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn.DNNClassifier(
n_classes=3,
feature_columns=cont_features,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=5)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
class DNNRegressorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=dnn.DNNRegressor(
feature_columns=[
feature_column.real_valued_column(
'feature', dimension=4)
],
hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, dnn.DNNRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=cont_features,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column.real_valued_column('', dimension=4)]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn.DNNRegressor(
weight_column_name='w',
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def _assertRegressionOutputs(
self, predictions, expected_shape):
predictions_nparray = np.array(predictions)
self.assertAllEqual(expected_shape, predictions_nparray.shape)
self.assertTrue(np.issubdtype(predictions_nparray.dtype, np.floating))
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self._assertRegressionOutputs(predicted_scores, [3])
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self._assertRegressionOutputs(predicted_scores, [3])
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics that use MetricSpec."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn.DNNRegressor(
feature_columns=[feature_column.real_valued_column('x')],
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = dnn.DNNRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[3, 3], config=config)
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=True,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertIn('dnn/regression_head/centered_bias_weight',
regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
sparse_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(
sparse_column, dimension=1),
feature_column.real_valued_column('age')
]
regressor = dnn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
self.assertNotIn('centered_bias_weight', regressor.get_variable_names())
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = dnn.DNNRegressor(feature_columns=feature_columns, hidden_units=[3, 3])
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
ibayer/fastFM-fork | fastFM/validation.py | 1 | 10668 | # Static versions of non-core sklearn.utils functions.
# Placed here since they are subject to change.
"""Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import numbers
import warnings
import numpy as np
import scipy.sparse as sparse
from functools import wraps
def _check_matrix_is_sparse(func):
"""
Check that input is a scipy sparse matrix and raise warning otherwise.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if 'accept_sparse' in kwargs and not sparse.isspmatrix(args[0]):
raise TypeError('A dense matrix was passed in, but sparse'
'data is required.')
result = func(*args, **kwargs)
return result
return wrapper
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, order, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse is None:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
sparse_type = spmatrix.format
if dtype is None:
dtype = spmatrix.dtype
if sparse_type in accept_sparse:
# correct type
if dtype == spmatrix.dtype:
# correct dtype
if copy:
spmatrix = spmatrix.copy()
else:
# convert dtype
spmatrix = spmatrix.astype(dtype)
else:
# create new
spmatrix = spmatrix.asformat(accept_sparse[0]).astype(dtype)
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
assert_all_finite(spmatrix.data)
if hasattr(spmatrix, "data"):
spmatrix.data = np.array(spmatrix.data, copy=False, order=order)
return spmatrix
def assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
@_check_matrix_is_sparse
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
if sparse.issparse(array):
if dtype_numeric:
dtype = None
array = _ensure_sparse_format(array, accept_sparse, dtype, order,
copy, force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
if dtype_numeric:
if hasattr(array, "dtype") and getattr(array.dtype, "kind", None) == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
return array
def check_consistent_length(x1, x2):
return x1.shape[0] == x2.shape[0]
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
| bsd-3-clause |
Akshay0724/scikit-learn | sklearn/utils/testing.py | 29 | 25405 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# Giorgio Patrini
# Thierry Guillemot
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import struct
import scipy as sp
import scipy.io
from functools import wraps
from operator import itemgetter
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
import unittest
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
from nose.tools import raises
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
from numpy.testing import assert_approx_equal
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
from sklearn.cluster import DBSCAN
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal",
"assert_approx_equal", "SkipTest"]
_dummy = unittest.TestCase('__init__')
assert_equal = _dummy.assertEqual
assert_not_equal = _dummy.assertNotEqual
assert_true = _dummy.assertTrue
assert_false = _dummy.assertFalse
assert_raises = _dummy.assertRaises
SkipTest = unittest.case.SkipTest
assert_dict_equal = _dummy.assertDictEqual
assert_in = _dummy.assertIn
assert_not_in = _dummy.assertNotIn
assert_less = _dummy.assertLess
assert_greater = _dummy.assertGreater
assert_less_equal = _dummy.assertLessEqual
assert_greater_equal = _dummy.assertGreaterEqual
try:
assert_raises_regex = _dummy.assertRaisesRegex
except AttributeError:
# Python 2.7
assert_raises_regex = _dummy.assertRaisesRegexp
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the backward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: [%s]"
% (func.__name__,
', '.join(str(warning) for warning in w)))
return result
def ignore_warnings(obj=None, category=Warning):
"""Context manager and decorator to ignore warnings.
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Parameters
----------
category : warning class, defaults to Warning.
The category to filter. If Warning, all categories will be muted.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _IgnoreWarnings(category=category)(obj)
else:
return _IgnoreWarnings(category=category)
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager and decorator.
This class allows to ignore the warnings raise by a function.
Copied from Python 2.7.5 and modified as required.
Parameters
----------
category : tuple of warning class, defaut to Warning
The category to filter. By default, all the categories will be muted.
"""
def __init__(self, category):
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
self.category = category
def __call__(self, fn):
"""Decorator to catch and hide warnings without visual nesting."""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings():
warnings.simplefilter("ignore", self.category)
return fn(*args, **kwargs)
return wrapper
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter("ignore", self.category)
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
assert_less = _dummy.assertLess
assert_greater = _dummy.assertGreater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions.
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
function : callable
Calable object to raise error
*args : the positional arguments to `function`.
**kw : the keyword arguments to `function`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier", "MultiOutputEstimator",
"MultiOutputRegressor", "MultiOutputClassifier",
"OutputCodeClassifier", "OneVsRestClassifier",
"RFE", "RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV",
"SelectFromModel"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if (".tests." in modname):
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator) and
c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or "
"None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
# itemgetter is used to ensure the sort does not extend to the 2nd item of
# the tuple
return sorted(set(estimators), key=itemgetter(0))
def set_random_state(estimator, random_state=0):
"""Set random state of an estimator if it has the `random_state` param.
Classes for whom random_state is deprecated are ignored. Currently DBSCAN
is one such class.
"""
if isinstance(estimator, DBSCAN):
return
if "random_state" in estimator.get_params():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed."""
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def skip_if_32bit(func):
"""Test decorator that skips tests on 32bit platforms."""
@wraps(func)
def run_test(*args, **kwargs):
bits = 8 * struct.calcsize("P")
if bits == 32:
raise SkipTest('Test skipped on 32bit platforms.')
else:
return func(*args, **kwargs)
return run_test
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing.
Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction with
some implementation of BLAS (or other libraries that manage an internal
posix thread pool) can cause a crash or a freeze of the Python process.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OS X with.
Under Python 3.4+ it is possible to use the `forkserver` start method
for multiprocessing to avoid this issue. However it can cause pickling
errors on interactively defined functions. It therefore not enabled by
default.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin':
raise SkipTest(
"Possible multi-process bug with some BLAS")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings."""
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independence).
"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
class _named_check(object):
"""Wraps a check to show a useful description
Parameters
----------
check : function
Must have ``__name__`` and ``__call__``
arg_text : str
A summary of arguments to the check
"""
# Setting the description on the function itself can give incorrect results
# in failing tests
def __init__(self, check, arg_text):
self.check = check
self.description = ("{0[1]}.{0[3]}:{1.__name__}({2})".format(
inspect.stack()[1], check, arg_text))
def __call__(self, *args, **kwargs):
return self.check(*args, **kwargs)
| bsd-3-clause |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
Use the Edit dataset card button to edit it.
- Downloads last month
- 35